diff --git "a/5398.jsonl" "b/5398.jsonl" new file mode 100644--- /dev/null +++ "b/5398.jsonl" @@ -0,0 +1,796 @@ +{"seq_id":"314155614","text":"import sys\nsys.path.append('A:/UBB/1st Year/FP/Assignment 5-7')\nimport pickle\nfrom Repository.DisciplineRepository import DisciplineRepository\nfrom Domain.Discipline import Discipline\nfrom Domain.Discipline import readDisfromLine\nfrom Domain.Discipline import writeDistoLine\n\nclass DisciplineRepositoryPickle(DisciplineRepository):\n\n def __init__(self, readDisfromLine, writeDistoLine, fname):\n self._fname = fname\n DisciplineRepository.__init__(self)\n self._readDisfromLine = readDisfromLine\n self._writeDistoLine = writeDistoLine\n\n def readAllfromFile(self):\n\n with open(self._fname, 'rb') as f:\n while True:\n try:\n line = pickle.load(f)\n dis = self._readDisfromLine(line)\n self.add(dis)\n except EOFError:\n break\n\n def writeAlltoFile(self):\n\n with open(self._fname, 'wb') as f:\n\n for el in self.getAll():\n line = self._writeDistoLine(el)\n pickle.dump(line, f, pickle.HIGHEST_PROTOCOL)\n\n f.close()\n\n# repoDiscipline = DisciplineRepositoryPickle(readDisfromLine, writeDistoLine, 'Discipline.pickle')\n# repoDiscipline.readAllfromFile()\n# for i in repoDiscipline.getAll():\n# print(i)","sub_path":"Assignment 5-7/Repository/DisciplineRepositoryPickle.py","file_name":"DisciplineRepositoryPickle.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"316585962","text":"from sys import argv\r\nif len(argv) != 2 or argv[1] not in [\"a\", \"b\"]:\r\n print(\"Usage: (filename) [a|b]\")\r\n exit(0)\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.special import beta\r\n\r\ndef uniform_posterior(x):\r\n return 2772 * (x**5) * ((1-x)**5)\r\n\r\ndef beta_posterior(x):\r\n a = 8\r\n b = 7\r\n return (x**(a-1)) * ((1-x)**(b-1)) / beta(a, b)\r\n\r\ndomain = np.arange(0.4, 0.6, 0.01)\r\nfunction = uniform_posterior if argv[1] == 'a' else beta_posterior\r\nplt.plot(domain, function(domain))\r\nplt.show()","sub_path":"CS376/HW1/posterior7.py","file_name":"posterior7.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"602355246","text":"from PySide import QtGui, QtCore\nimport operator\nfrom pyrf.devices.thinkrf import discover_wsa\n\nclass DiscoveryWidget(QtGui.QWidget):\n \"\"\"\n A widget based from the Qt QGroupBox widget with a layout containing widgets that\n can be used to display WSA5000's available on the network\n :param name: The name of the groupBox\n :open_device_callback: A function that is called which returns the IP selected\n\n \"\"\"\n def __init__(self, open_device_callback=None, name=\"Discovery Tool\"):\n super(DiscoveryWidget, self).__init__()\n\n self._open_device_callback = open_device_callback\n\n self.setMinimumWidth(400)\n self.setWindowTitle(name)\n dev_layout = QtGui.QVBoxLayout(self)\n\n first_row = QtGui.QHBoxLayout()\n first_row.addWidget(QtGui.QLabel(\"Devices Detected on Local Network\"))\n\n second_row = QtGui.QHBoxLayout()\n second_row.addWidget(self._wsa_list())\n\n ok = self._ok_button()\n\n third_row = QtGui.QHBoxLayout()\n third_row.addWidget(QtGui.QLabel(\"Manually Enter Device IP:\"))\n third_row.addWidget(self._ip_edit())\n\n fourth_row = QtGui.QHBoxLayout()\n fourth_row.addWidget(ok)\n fourth_row.addWidget(self._refresh_button())\n fourth_row.addWidget(self._cancel_button())\n\n dev_layout.addLayout(first_row)\n dev_layout.addLayout(second_row)\n dev_layout.addLayout(third_row)\n dev_layout.addLayout(fourth_row)\n self.setLayout(dev_layout)\n self.layout = dev_layout\n\n self.timer = QtCore.QTimer()\n self.timer.timeout.connect(self._refresh_list)\n self.timer.start(10000)\n def return_pressed(self):\n self._ok.click()\n\n def _wsa_list(self):\n self._list = QtGui.QListWidget()\n self._refresh_list()\n\n def list_clicked():\n if self._list.currentItem() is not None:\n self._ip.setText(self._list.currentItem().text().split(\" \")[-1])\n self._list.currentItemChanged.connect(list_clicked)\n return self._list\n\n def _ip_edit(self):\n self._ip = QtGui.QLineEdit()\n self._ip.returnPressed.connect(self.return_pressed)\n return self._ip\n\n def _ok_button(self):\n self._ok = QtGui.QPushButton(\"Connect\")\n def ok_clicked():\n if not self._ip.text() == \"\":\n if self._open_device_callback is not None:\n self._open_device_callback(self._ip.text(), True)\n self.close()\n self._ok.clicked.connect(ok_clicked)\n return self._ok\n\n def _refresh_button(self):\n self._refresh = QtGui.QPushButton(\"Refresh\")\n self._refresh.clicked.connect(self._refresh_list)\n return self._refresh\n\n def _cancel_button(self):\n self._cancel = QtGui.QPushButton(\"Cancel\")\n def cancel_clicked():\n if self._open_device_callback is not None:\n self._open_device_callback(self._ip.text(), False)\n self.close()\n self._cancel.clicked.connect(cancel_clicked)\n return self._cancel\n\n def closeEvent(self, event):\n if self._open_device_callback is not None:\n self._open_device_callback(self._ip.text(), False)\n self.timer.stop()\n\n def _refresh_list(self):\n self._list.clear()\n wsas_on_network = discover_wsa()\n wsas_on_network.sort(key=operator.itemgetter('SERIAL'))\n for wsa in wsas_on_network:\n if \"WSA5000\" in wsa[\"MODEL\"]:\n self._list.addItem(\" \".join([wsa[\"MODEL\"], wsa[\"SERIAL\"], wsa[\"FIRMWARE\"], wsa[\"HOST\"]]))\n elif \"WSA4000\" in wsa[\"MODEL\"]:\n self._list.addItem(\" \".join([wsa[\"MODEL\"], wsa[\"SERIAL\"], wsa[\"HOST\"]]))\n","sub_path":"pyrf/gui/discovery_widget.py","file_name":"discovery_widget.py","file_ext":"py","file_size_in_byte":3736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"360058488","text":"from __future__ import absolute_import\nfrom __future__ import unicode_literals\nfrom django.test import TestCase\nfrom corehq.apps.domain.models import Domain\nfrom corehq.apps.hqcase.utils import update_case\nfrom corehq.apps.locations.models import SQLLocation, LocationType\nfrom corehq.apps.reminders.models import CaseReminder, CaseReminderHandler\nfrom corehq.apps.users.models import CommCareUser\nfrom corehq.form_processor.tests.utils import run_with_all_backends\nfrom corehq.util.test_utils import create_test_case, set_parent_case\nfrom mock import patch\n\n\nclass ReminderRecipientTest(TestCase):\n domain = 'reminder-recipient-test'\n\n def setUp(self):\n self.domain_obj = Domain(name=self.domain)\n self.domain_obj.save()\n\n self.parent_location_type = LocationType.objects.create(\n domain=self.domain,\n name='parent type',\n code='parent'\n )\n\n self.child_location_type = LocationType.objects.create(\n domain=self.domain,\n name='child type',\n code='child',\n parent_type=self.parent_location_type\n )\n\n self.user = CommCareUser.create(self.domain, 'test', 'test')\n\n def tearDown(self):\n self.parent_location_type.delete()\n self.child_location_type.delete()\n self.user.delete()\n self.domain_obj.delete()\n\n @run_with_all_backends\n def test_recipient_case_owner_location_parent(self):\n parent_location = SQLLocation.objects.create(\n domain=self.domain,\n name='parent test',\n site_code='parent',\n location_type=self.parent_location_type\n )\n self.addCleanup(parent_location.delete)\n\n child_location = SQLLocation.objects.create(\n domain=self.domain,\n name='child test',\n site_code='child',\n location_type=self.child_location_type,\n parent=parent_location\n )\n self.addCleanup(child_location.delete)\n\n self.user.set_location(child_location)\n\n with create_test_case(self.domain, 'test-case', 'test-name', owner_id=self.user.get_id) as case:\n self.assertEqual(case.owner_id, self.user.get_id)\n handler = CaseReminderHandler(domain=self.domain, recipient='CASE_OWNER_LOCATION_PARENT')\n reminder = CaseReminder(domain=self.domain, case_id=case.case_id)\n\n # Test the recipient is returned correctly\n with patch('corehq.apps.reminders.models.CaseReminder.handler', new=handler):\n self.assertEqual(reminder.recipient, [parent_location])\n\n # Remove parent location\n child_location.parent = None\n child_location.save()\n with patch('corehq.apps.reminders.models.CaseReminder.handler', new=handler):\n self.assertIsNone(reminder.recipient)\n\n # Remove child location\n self.user.unset_location()\n with patch('corehq.apps.reminders.models.CaseReminder.handler', new=handler):\n self.assertIsNone(reminder.recipient)\n\n # Remove case\n reminder.case_id = None\n with patch('corehq.apps.reminders.models.CaseReminder.handler', new=handler):\n self.assertIsNone(reminder.recipient)\n\n @run_with_all_backends\n def test_host_case_owner_location(self):\n parent_location = SQLLocation.objects.create(\n domain=self.domain,\n name='parent test',\n site_code='parent',\n location_type=self.parent_location_type\n )\n self.addCleanup(parent_location.delete)\n\n child_location = SQLLocation.objects.create(\n domain=self.domain,\n name='child test',\n site_code='child',\n location_type=self.child_location_type,\n parent=parent_location\n )\n self.addCleanup(child_location.delete)\n\n with create_test_case(self.domain, 'test-extension-case', 'name') as extension_case:\n with create_test_case(self.domain, 'test-host-case', 'name') as host_case:\n\n update_case(self.domain, host_case.case_id,\n case_properties={'owner_id': child_location.location_id})\n set_parent_case(self.domain, extension_case, host_case, relationship='extension')\n\n handler1 = CaseReminderHandler(domain=self.domain, recipient='HOST_CASE_OWNER_LOCATION')\n handler2 = CaseReminderHandler(domain=self.domain, recipient='HOST_CASE_OWNER_LOCATION_PARENT')\n reminder = CaseReminder(domain=self.domain, case_id=extension_case.case_id)\n\n # Test the recipients are returned correctly\n with patch('corehq.apps.reminders.models.CaseReminder.handler', new=handler1):\n self.assertEqual(reminder.recipient, [child_location])\n\n with patch('corehq.apps.reminders.models.CaseReminder.handler', new=handler2):\n self.assertEqual(reminder.recipient, [parent_location])\n\n # Remove parent location reference\n child_location.parent = None\n child_location.save()\n with patch('corehq.apps.reminders.models.CaseReminder.handler', new=handler2):\n self.assertIsNone(reminder.recipient)\n\n # Test location that does not exist\n update_case(self.domain, host_case.case_id, case_properties={'owner_id': 'does-not-exist'})\n with patch('corehq.apps.reminders.models.CaseReminder.handler', new=handler1):\n self.assertIsNone(reminder.recipient)\n\n # Test on a case that is not an extension case\n reminder.case_id = host_case.case_id\n with patch('corehq.apps.reminders.models.CaseReminder.handler', new=handler1):\n self.assertIsNone(reminder.recipient)\n\n # Test on a blank case id\n reminder.case_id = None\n with patch('corehq.apps.reminders.models.CaseReminder.handler', new=handler1):\n self.assertIsNone(reminder.recipient)\n","sub_path":"corehq/apps/reminders/tests/test_recipient.py","file_name":"test_recipient.py","file_ext":"py","file_size_in_byte":6162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"249422833","text":"import sys\nimport time\nimport math\n\n#calculate distance between two points\ndef calcDistance(x1,y1,x2,y2):\n dist = math.sqrt((x2-x1)**2 + (y2-y1)**2)\n return dist\n\n#test for any command line arguments\n#remove .txt extension from argument\ntry:\n arg1 = sys.argv[1]\n print(\"Now testing: \" + arg1 + \"\\n\")\n outputfilename = arg1\n outputfilename = outputfilename.replace(\".txt\",\"\")\n #print(\"parsedname: \")\n #print(outputfilename)\nexcept:\n print(\"Error: No test file given.\\n\")\n sys.exit(1)\n\n\nbegin_time = time.time()\nnum_pts = 0\n\n##################################\n#### POPULATE COORDINATE LIST#####\n##################################\n#initialize list\ncoord_by_x = []\n#coord_by_y = []\n#open file (arg1)\ntest_data = open(arg1, 'r')\n#go through each line of file\nfor line in test_data:\n #grab line from file and convert to list of float\n coord_xy = map(float, line.split())\n #add the coordinates to list\n coord_by_x.append(coord_xy)\n #coord_by_y.append(coord_xy)\n #print(coord_xy)\n num_pts += 1\n\ntest_data.close()\n#################################\n#################################\n\n#############################\n# SORT LIST BY X COORDINATE #\n#############################\ncoord_by_x.sort(key=lambda elem: elem[0])\n#############################\n# SORT LIST BY Y COORDINATE #\n#############################\n#coord_by_y.sort(key=lambda elem: elem[1])\n\n#print(coord_by_x)\n#print(\"\\n\")\n#print(coord_by_y)\n\n#################################\n# CALCULATE MIN DISTANCE (BRUTE)#\n#################################\ndef brute_closestneighbor(total_pts):\n#initialize closest neighbor to infinity\n min_dis = float(\"inf\")\n \n for i in range(0,total_pts):\n x1 = coord_by_x[i][0]\n y1 = coord_by_x[i][1]\n #print(\"COORD1:\" + str(x1) + \" \" + str(y1))\n for j in range(i+1,total_pts):\n x2 = coord_by_x[j][0]\n y2 = coord_by_x[j][1]\n #print(\"COORD2:\" + str(x2) + \" \" + str(y2))\n cur_dis = calcDistance(x1,y1,x2,y2)\n if cur_dis < min_dis:\n min_dis = cur_dis\n\n return min_dis\n\n#list passed in is already sorted\n#pass in lists sorted by X coord\ndef get_medianX(sorted_list):\n size = len(sorted_list)\n if size < 1:\n return None\n if size %2 == 1:\n #return x coord of middle coord\n return sorted_list[((size+1)/2)-1][0]\n else:\n #return calculated median\n leftx = sorted_list[(size/2)-1][0]\n rightx = sorted_list[(size/2)][0]\n return float(leftx+rightx)/2.0\n \n\n#pass in lists sorted by Y coord\ndef get_medianY(sorted_list):\n size = len(sorted_list)\n if size < 1:\n return None\n if size %2 == 1:\n #return x coord of middle coord\n return sorted_list[((size+1)/2)-1][1]\n else:\n #return calculated median\n lefty = sorted_list[(size/2)-1][1]\n righty = sorted_list[(size/2)][1]\n return float(lefty+righty)/2.0\n\n\n\n###############################################\n# CALCULATE MIN DISTANCE (DIVIDE AND CONQUER) #\n###############################################\ndef closest_neighbor_DaC(sortX):\n #get number of coord in X\n numrows = len(sortX)\n #if data set small enough use brute closestneighbor\n if numrows <= 3:\n return brute_closestneighbor(numrows)\n \n #divide pts in sortX into two subset\n median = get_medianX(sortX)\n \n #split sorted list of x coords in half\n sub_leftx = [] \n #sortX[:numrows/2]\n sub_rightx = []\n #sortX[numrows/2:]\n for i in range(0,numrows):\n if sortX[i][0] <= median:\n sub_leftx.append(sortX[i])\n else:\n sub_rightx.append(sortX[i])\n \n #recursive call on left and right half \n closest_left = closest_neighbor_DaC(sub_leftx)\n closest_right = closest_neighbor_DaC(sub_rightx)\n #sortX is the full list of points\n closest_distance = 0\n if closest_left < closest_right:\n smallest_dis = closest_left\n else:\n smallest_dis = closest_right\n\n #'Remove' coordinates that lie outside of the 2d wide strip\n #initialize array to be sorted by Y coord\n middle_coords = []\n max_left_dis = median - smallest_dis\n max_right_dis = median + smallest_dis\n \n for j in range(0,numrows):\n if sortX[j][0] > max_left_dis and sortX[j][0] < max_right_dis:\n middle_coords.append(sortX[j])\n \n #sort list of coordinates within middle by Y coordinate \n middle_coords.sort(key=lambda elem: elem[1])\n \n num_in_window = len(middle_coords)\n \n #go through middle 2d wide window\n # and calculate distances\n for x in range(0,num_in_window):\n coord_x1 = middle_coords[x][0]\n coord_y1 = middle_coords[x][1]\n #maximum of 8 points can lie within middle window\n next_y = x+1\n max_y = x+7\n y = next_y\n while y != max_y and y < num_in_window:\n coord_x2 = middle_coords[y][0]\n coord_y2 = middle_coords[y][1]\n current_dis = calcDistance(coord_x1,coord_y1,coord_x2,coord_y2)\n #set new minimum distance if smaller\n if calcDistance(coord_x1,coord_y1,coord_x2,coord_y2) < smallest_dis:\n smallest_dis = current_dis\n y = y+1\n \n \n return smallest_dis\n\n\n##############################################\n##### GET CLOSEST NEIGHBOR ###################\n##############################################\n#closest_neigh = brute_closestneighbor(num_pts)\nclosest_neigh = closest_neighbor_DaC(coord_by_x)\n\n#########################\n# WRITE RESULTS TO FILE #\n#########################\n#getopt.getopt(args,options[,longoptions])\n#openfile distance.txt and give permissions to write to file\noutputfilename = outputfilename + \"_distance.txt\"\ndistance_file = open(outputfilename,'w+');\n#write to file distance.txt\n#convert distance to string and write to file\ndistance_file.write(str(closest_neigh))\n#close file distance.txt\ndistance_file.close()\n\n#calculate execution time and print\n#end_time = round(time.time() - begin_time, 2)\n#use round to not get crazy decimals for time\nend_time = time.time() - begin_time\nprint(\"CLOSEST_NEIGHBOR: \" + str(closest_neigh))\nprint(\"Execution Time: %s \\n \" % end_time)\n","sub_path":"nearest_neighbor.py","file_name":"nearest_neighbor.py","file_ext":"py","file_size_in_byte":6273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"597765399","text":"import os\nimport urllib\nfrom io import BytesIO\n\nfrom PIL import Image, ImageDraw, ImageFont, ImageSequence\nfrom django.contrib import auth\nfrom django.contrib.auth import authenticate\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import JsonResponse, HttpResponse\nfrom django.shortcuts import render, get_list_or_404, get_object_or_404\nfrom elasticsearch import Elasticsearch\nfrom elasticsearch_dsl import Search\n\nfrom .models import AsciiEmoji, Tag, EmojiSeries, SeriesElement\nfrom .models import User\nfrom .myForms import RegisterForm, LoginForm\n\n\n# Create your views here.\ndef index(request):\n return render(request, 'index.html')\n\n\ndef validate(request):\n if request.method == 'GET':\n form = LoginForm(request.GET)\n if form.is_valid():\n user = form.cleaned_data['user']\n psd = form.cleaned_data['password']\n user = authenticate(username=user, password=psd)\n if user is not None:\n if user.is_active:\n auth.login(request, user)\n return render(request, 'index.html', {\n 'user': user\n })\n else:\n return HttpResponse(\"用户不可用\")\n else:\n return HttpResponse(\"用户未验证\")\n else:\n return HttpResponse(\"表单无效\")\n\n\ndef register(request):\n registerForm = RegisterForm()\n return render(request, 'register.html', {\n 'registerForm': registerForm\n })\n\n\ndef createUser(request):\n if request.method == 'POST':\n form = RegisterForm(request.POST)\n if form.is_valid():\n user = form.cleaned_data['user']\n psd = form.cleaned_data['password']\n email = form.cleaned_data['email']\n user = User.objects.create_user(username=user, password=psd, email=email)\n user.save()\n return render(request, 'index.html', {\n 'user': user\n })\n else:\n return HttpResponse('not validate')\n else:\n print('not post')\n\n\ndef login(request):\n form = LoginForm()\n return render(request, 'login.html', {\n 'form': form\n })\n\n\ndef search(request):\n keyword = request.GET['content']\n if keyword:\n emoji_list = []\n mode = request.GET['search-type']\n # 连接到阿里云主机上的es进行查询\n es = Elasticsearch(hosts=[\"120.77.148.37\"])\n # 指定index和doc_type\n search = Search(using=es, index='emoji', doc_type='emojis')\n # 关键字匹配,并返回所有结果\n if mode == 'all':\n res = search.query('match', type=keyword).execute()\n res = search.query('match', type=keyword)[0:res.hits.total].execute()\n elif mode == 'static':\n res = search.query('match', type=keyword).exclude('term', filetype='gif').execute()\n res = search.query('match', type=keyword).exclude('term', filetype='gif')[0:res.hits.total].execute()\n elif mode == 'gif':\n res = search.query('match', type=keyword).query('match', filetype='gif').execute()\n res = search.query('match', type=keyword).query('match', filetype='gif')[0:res.hits.total].execute()\n\n # 把查询结果以字典的形式返回,并提取出需要的数据\n dict = res.to_dict()['hits']['hits']\n # 将提取的数据添加到emoji_list\n for item in dict:\n emoji_list.append(item['_source']['path'])\n\n return render(request, 'result.html',\n {'keyword': keyword, 'emoji_list': emoji_list})\n else:\n # 返回主页并提示没有输入任何关键字\n return render(request, 'index.html',\n {'error_message': \"你还没输入任何关键字呢 ╮(╯▽╰)╭\"})\n\n\n# 返回数据库中类似的关键词,生成下拉提示框\ndef ajax(request):\n result = []\n es = Elasticsearch(hosts=[\"120.77.148.37\"])\n # 指定index和doc_type\n search = Search(using=es, index='emoji', doc_type='emojis')\n # 关键字匹配,并返回至多100个结果\n res = search.query('match', type=request.GET['data']).execute()\n # 把查询结果以字典的形式返回,并提取出需要的数据\n dict = res.to_dict()['hits']['hits']\n # 将提取的数据添加到emoji_list\n for item in dict:\n result.append(item['_source']['type'])\n\n data = {\n 'result': list(set(result))\n }\n return JsonResponse(data)\n\n\ndef asciiAjax(request):\n result = Tag.objects.filter(name__icontains=request.GET['data']).values_list('name')\n data = {\n 'result': list(result)\n }\n return JsonResponse(data)\n\n\ndef asciiEmoji(request, page):\n # 返回数据库里的前30个标签\n tag_per_page = 30\n tag_list = get_list_or_404(Tag)\n pages = range(int(len(tag_list) / tag_per_page))\n return render(request, 'asciiEmoji.html', {\n 'tag_list': tag_list[page * tag_per_page:(page + 1) * tag_per_page],\n 'pages': pages,\n })\n\n\ndef asciiEmojiSearch(request):\n keyword = request.GET['content']\n if keyword:\n tag = get_object_or_404(Tag, name=keyword)\n if tag.emojis.count() >= 1:\n tag.hits += 1\n tag.save()\n ascii_list = get_list_or_404(AsciiEmoji, tag=tag)\n return render(request, 'asciiEmoji.html', {\n 'keyword': keyword,\n 'list': ascii_list\n })\n else:\n tag_per_page = 30\n tag_list = get_list_or_404(Tag)\n pages = range(int(len(tag_list) / tag_per_page))\n return render(request, 'asciiEmoji.html', {\n 'error_message': '没有这种颜文字喔,试试其他吧 (*゚ー゚)',\n 'tag_list': tag_list[:tag_per_page],\n 'pages': pages,\n })\n else:\n return render(request, 'asciiEmoji.html', {\n 'error_message': '你还没输入任何关键字呢 ╮(╯▽╰)╭'\n })\n\n\n# 获取某一页的表情系列\ndef emojiSet(reqeust, page):\n page_list = get_list_or_404(EmojiSeries)\n series_per_page = 5\n emoji_per_series = 5\n series_list = page_list[page * series_per_page: page * series_per_page + series_per_page]\n # 获取该系列的前5个表情\n for series in series_list:\n series.elements = get_list_or_404(SeriesElement, series=series)[:emoji_per_series]\n if page > 5:\n # 确定底部的页面导航栏共有几页\n if page < int(len(page_list) / series_per_page) - 4:\n pages = list(range(page - 4, page + 4))\n else:\n pages = list(range(page - 8, page))\n\n pages.insert(0, 0)\n pages.append(int(len(page_list) / series_per_page))\n else:\n pages = list(range(9))\n return render(reqeust, 'EmojiSet.html', {\n 'series_list': series_list,\n 'pages': pages\n })\n\n# 获取一个套图所有的图片\ndef detail(request, series_name):\n series = get_object_or_404(EmojiSeries, name=series_name)\n emojis = get_list_or_404(SeriesElement, series=series)\n return render(request, 'series-detail.html', {\n 'name': series.name,\n 'emojis': emojis,\n })\n\n\n@login_required()\ndef templates(request, page):\n template_dir = '../Crawler/Crawler/datas/material'\n template_list = os.listdir(template_dir)\n row = 4\n column = 6\n max_page = int(len(template_list) / (row * column))\n template_list = template_list[page * row * column:(page + 1) * row * column]\n if page < 5:\n pages = list(range(8))\n pages.append(max_page)\n\n elif page <= max_page - 5:\n pages = list(range(page - 5, page + 5))\n pages.insert(0, 0)\n pages.append(max_page)\n else:\n pages = list(range(page - 5, max_page))\n pages.insert(0, 0)\n # pages = range(int(len(template_list) / (row * column)))\n return render(request, 'templates.html', {\n 'list': template_list,\n 'pages': pages\n })\n\n\n@login_required()\ndef diy(request, img):\n colors = ['red', 'blue', 'purple', 'yellow', 'black', 'orange']\n fonts = ['msyh', 'STCAIYUN', 'FZSTK', 'STHUPO', 'simsunb']\n return render(request, 'diy.html', {\n 'img': img,\n 'colors': colors,\n 'fonts': fonts\n })\n\n\ndef add_text_to_image(image, text, font, color='black'):\n rgba_image = image.convert('RGBA')\n # 生成白底的图片\n text_overlay = Image.new('RGBA', rgba_image.size, (255, 255, 255, 0))\n image_draw = ImageDraw.Draw(text_overlay)\n text_size_x, text_size_y = image_draw.textsize(text, font=font)\n # 设置文本文字位置\n text_xy = ((rgba_image.size[0] - text_size_x) / 2, rgba_image.size[1] - text_size_y - 50)\n # 设置文本的颜色和透明度\n image_draw.text(text_xy, text, font=font, fill=color)\n # 合成\n image_with_text = Image.alpha_composite(rgba_image, text_overlay)\n return image_with_text\n\n\ndef add_text_to_gif(file, image, text, font, color='black', email='None'):\n frames = []\n # Loop over each frame in the animated image\n for frame in ImageSequence.Iterator(image):\n # 在每一帧上都添加文字\n d = ImageDraw.Draw(frame)\n # 解决中文乱码问题\n d.ink = 0 + 255 * 256 + 0 * 255 * 256\n\n x_size, y_size = image.size\n text_size_x, text_size_y = d.textsize(text, font=font)\n text_xy = ((x_size - text_size_x) / 2, y_size - text_size_y - 50)\n d.text(text_xy, text=text, font=font, fill=10)\n\n del d\n\n # 以bytesio来保存图片,比以文件保存更高效\n b = BytesIO()\n frame.save(b, format=\"GIF\")\n frame = Image.open(b)\n frames.append(frame)\n\n # 将所有帧组合成一个gif\n if email == \"None\":\n stream = BytesIO()\n frames[0].save(stream, 'GIF', save_all=True, append_images=frames[1:])\n return stream.getvalue()\n else:\n dir = '../Crawler/Crawler/datas/userEmojis'\n if not os.path.exists('%s/%s' % (dir, email)):\n os.makedirs('%s/%s' % (dir, email))\n path = '%s/%s/%s' % (dir, email, file)\n frames[0].save(path, save_all=True, append_images=frames[1:])\n\n\n# 将数据传递到新的html页面,新页面的img发请求生成\n@login_required()\ndef submitData(request):\n if request.method == 'GET':\n material = urllib.parse.unquote(request.GET['material'])[29:]\n text = request.GET['text']\n font_size = request.GET['font-size'][:-2]\n color = request.GET['color']\n font = request.GET['font']\n\n return render(request, 'newEmoji.html', {\n 'name': material[:-4],\n 'material': material,\n 'text': text,\n 'font_size': font_size,\n 'color': color,\n 'font': font,\n 'file': material\n })\n else:\n print(\"not get\")\n\n\ndef process_template(request, material, text, font_size, color, font, email):\n if request.method == 'GET':\n font_dir = 'C:\\Windows\\Fonts'\n font_path = font_dir + '/' + font + '.ttf'\n if not os.path.exists(font_path):\n font_path = font_dir + '/' + font + '.ttc'\n # 浏览器的字体大小和图像的字体大小不一致,需要放缩\n defaultFont = ImageFont.truetype(font_path, int(int(font_size) * 2))\n dir = \"../Crawler/Crawler/datas/material\"\n if material[-3:] == 'gif':\n if email != \"None\":\n add_text_to_gif(material, Image.open(dir + \"/\" + material), text, defaultFont, color, email)\n return HttpResponse(\"保存成功\")\n else:\n img = add_text_to_gif(material, Image.open(dir + \"/\" + material), text, defaultFont, color, \"None\")\n return HttpResponse(img, content_type='image/gif')\n\n else:\n img = add_text_to_image(Image.open(dir + \"/\" + material), text, defaultFont, color)\n if email != \"None\":\n dir = '../../GraduationProject/Crawler/Crawler/datas/userEmojis'\n if not os.path.exists('%s/%s' % (dir, email)):\n os.makedirs('%s/%s' % (dir, email))\n path = '%s/%s/%s' % (dir, email, material.replace(\"jpg\", \"png\"))\n img.save(path)\n return HttpResponse(\"保存成功\")\n else:\n stream = BytesIO()\n img.save(stream, 'PNG')\n return HttpResponse(stream.getvalue(), content_type=\"image/png\")\n","sub_path":"emoji/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"396726826","text":"from django.core.management.base import BaseCommand\nfrom about_me.models import Work, Education, PersonalData, Hobby\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n\n models_lst = Work, Education, PersonalData, Hobby\n for item in models_lst:\n item.objects.all().delete()\n print('Model', str(item).split('.')[-1].rstrip(\"'>\"), 'deleted')\n print('All entries deleted')\n\n works = [\n {\n 'place': \"НИИ телевидения\",\n 'job': \"инженер\",\n \"address\": 'приветливая улица, д.11',\n \"phone\": '555-55-55',\n \"tax_num\": '498354687',\n 'start': \"2016-08-01\",\n 'finish': None\n },\n {\n 'place': \"Philips\",\n 'job': \"Создатель наушников\",\n \"address\": 'ветренная улица, д.11',\n \"phone\": '544-44-44',\n \"tax_num\": '845143587',\n 'start': \"2011-08-01\",\n 'finish': \"2012-05-11\"\n },\n {\n 'place': \"80's guy\",\n 'job': \"диск жокей\",\n \"address\": 'уверенная улица, д. 1',\n \"phone\": '565-44-44',\n \"tax_num\": '845143587',\n 'start': \"2010-08-01\",\n 'finish': \"2011-05-11\"\n },\n {\n 'place': \"Дом\",\n 'job': \"домохозяин\",\n \"address\": 'деревянная улица, д. 23',\n \"phone\": '522-41-48',\n \"tax_num\": '84123587',\n 'start': \"2009-08-01\",\n 'finish': \"2010-05-11\"\n },\n {\n 'place': \"Чайник\",\n 'job': \"чайный лист\",\n \"address\": 'приятная улица, д. 12',\n \"phone\": '555-42-21',\n \"tax_num\": '845143423',\n 'start': \"2008-08-01\",\n 'finish': \"2009-05-11\"\n },\n ]\n personal = [\n {\n 'first_name': 'Антон',\n 'middle_name': \"Геннадьевич\",\n 'last_name': \"Чепелев\",\n 'birth_date': \"1994-12-21\"\n }\n ]\n educations = [\n {\n 'place': 'школа №555 \"Белогорье\"',\n \"degree\": \"школьник\",\n 'start': \"2000-09-01\",\n 'finish': \"2011-08-31\"\n },\n {\n 'place': 'СПбГЭТУ \"ЛЭТИ\"',\n \"degree\": \"бакалавр\",\n 'start': \"2011-09-01\",\n 'finish': \"2015-08-31\"\n },\n {\n 'place': 'СПбГЭТУ \"ЛЭТИ\"',\n \"degree\": \"магистр\",\n 'start': \"2015-09-01\",\n 'finish': None\n }\n ]\n hobbies = [\n {\n \"hobby\": 'Python',\n },\n {\n \"hobby\": 'музыка',\n },\n {\n \"hobby\": 'дуракаваляние',\n },\n {\n \"hobby\": 'чтение',\n },\n ]\n\n for item in works:\n Work(**item).save()\n\n for item in personal:\n item['work_place'] = Work.objects.get(finish=None)\n PersonalData(**item).save()\n\n for item in educations:\n Education(**item).save()\n\n for item in hobbies:\n Hobby(**item).save()\n\n print('Default data saved')\n","sub_path":"my_site/about_me/management/commands/fill_db.py","file_name":"fill_db.py","file_ext":"py","file_size_in_byte":3759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"577919517","text":"from energenie.Devices.MiHomeDevice import MiHomeDevice\nfrom energenie.Handlers import HandlerRegistry\nimport energenie.OpenThings as OpenThings\n\n\nSWITCH = {\n \"recs\": [\n {\n \"wr\": True,\n \"paramid\": OpenThings.PARAM_SWITCH_STATE,\n \"typeid\": OpenThings.Value.UINT,\n \"length\": 1,\n \"value\": 0 # FILL IN\n }\n ]\n}\n\n\nclass MIHO005(MiHomeDevice):\n _product_id = 0x02\n _product_name = \"Socket Adapter Plus\"\n _product_description = \"The Adapter Plus allows you to monitor the power being used and control of the attached device.\"\n _product_rf = \"FSK(tx,rx)\"\n _product_url = \"https://energenie4u.co.uk/catalogue/product/MIHO005\"\n _product_image_url = \"https://energenie4u.co.uk/res/images/products/large/MIHO005%20WEBSITE.jpg\"\n _product_user_guide = \"https://energenie4u.co.uk/res/pdfs/ENER035%20user%20guide.pdf\"\n\n \"\"\"An Energenie MiHome Adaptor Plus\"\"\"\n def __init__(self, **kw_args):\n MiHomeDevice.__init__(self, **kw_args)\n\n class Readings():\n switch = None\n voltage = None\n frequency = None\n current = None\n apparent_power = None\n reactive_power = None\n real_power = None\n self.readings = Readings()\n\n self.radio_config.inner_times = 4\n\n @staticmethod\n def join_req(deviceid):\n \"\"\"Get a synthetic join request from this device, for testing\"\"\"\n return MiHomeDevice.join_req(MIHO005._manufacturer_id, MIHO005._product_id, deviceid)\n\n def handle_message(self, payload):\n # print(\"MIHO005 new data %s %s\" % (self.device_id, payload))\n for rec in payload[\"recs\"]:\n paramid = rec[\"paramid\"]\n # TODO: consider making this table driven and allowing our base class to fill our readings in for us\n # then just define the mapping table in __init__ (i.e. paramid->Readings field name)\n value = rec[\"value\"]\n if paramid == OpenThings.PARAM_SWITCH_STATE:\n self.readings.switch = ((value is True) or (value != 0))\n HandlerRegistry.handle_reading(self.uuid, 'switch', value)\n elif paramid == OpenThings.PARAM_VOLTAGE:\n self.readings.voltage = value\n HandlerRegistry.handle_reading(self.uuid, 'voltage', value)\n elif paramid == OpenThings.PARAM_CURRENT:\n self.readings.current = value\n HandlerRegistry.handle_reading(self.uuid, 'current', value)\n elif paramid == OpenThings.PARAM_REAL_POWER:\n self.readings.real_power = value\n HandlerRegistry.handle_reading(self.uuid, 'real_power', value)\n elif paramid == OpenThings.PARAM_APPARENT_POWER:\n self.readings.apparent_power = value\n HandlerRegistry.handle_reading(self.uuid, 'apparent_power', value)\n elif paramid == OpenThings.PARAM_REACTIVE_POWER:\n self.readings.reactive_power = value\n HandlerRegistry.handle_reading(self.uuid, 'reactive_power', value)\n elif paramid == OpenThings.PARAM_FREQUENCY:\n self.readings.frequency = value\n HandlerRegistry.handle_reading(self.uuid, 'frequency', value)\n else:\n try:\n param_name = OpenThings.param_info[paramid]['n'] # name\n except:\n param_name = \"UNKNOWN_%s\" % str(hex(paramid))\n print(\"unwanted paramid: %s\" % param_name)\n\n def turn_on(self):\n # TODO: header construction should be in MiHomeDevice as it is shared?\n payload = OpenThings.Message(SWITCH)\n payload.set(header_productid=self.__class__._product_id,\n header_sensorid=self.device_id,\n recs_SWITCH_STATE_value=True)\n self.send_message(payload)\n\n def turn_off(self):\n # TODO: header construction should be in MiHomeDevice as it is shared?\n payload = OpenThings.Message(SWITCH, header=self.__class__.header())\n payload.set(header_productid=self.__class__._product_id,\n header_sensorid=self.device_id,\n recs_SWITCH_STATE_value=False)\n self.send_message(payload)\n\n def set_switch_state(self, state: bool):\n if state:\n self.turn_on()\n else:\n self.turn_off()\n\n def get_switch_state(self) -> bool:\n \"\"\"Last stored state of the switch, might be None if unknown\"\"\"\n return self.readings.switch\n\n # TODO: difference between 'is on and 'is requested on'\n # TODO: difference between 'is off' and 'is requested off'\n # TODO: switch state might be 'unknown' if not heard.\n # TODO: switch state might be 'turning_on' or 'turning_off' if send request and not heard response yet\n\n def is_on(self): # -> boolean\n \"\"\"True, False, or None if unknown\"\"\"\n s = self.get_switch_state()\n if s is None: return None\n return s\n\n def is_off(self): # -> boolean\n \"\"\"True, False, or None if unknown\"\"\"\n s = self.get_switch_state()\n if s is None: return None\n return not s\n\n def get_voltage(self) -> float: # -> voltage:float\n \"\"\"Last stored state of voltage reading, None if unknown\"\"\"\n if self.readings.voltage is None:\n raise RuntimeError(\"No voltage reading received yet\")\n return self.readings.voltage\n\n def get_frequency(self) -> float: # -> frequency:float\n \"\"\"Last stored state of frequency reading, None if unknown\"\"\"\n if self.readings.frequency is None:\n raise RuntimeError(\"No frequency reading received yet\")\n return self.readings.frequency\n\n def get_apparent_power(self) -> float: # ->power:float\n \"\"\"Last stored state of apparent power reading, None if unknown\"\"\"\n if self.readings.apparent_power is None:\n raise RuntimeError(\"No apparent power reading received yet\")\n return self.readings.apparent_power\n\n def get_reactive_power(self) -> float: # -> power:float\n \"\"\"Last stored state of reactive power reading, None if unknown\"\"\"\n if self.readings.reactive_power is None:\n raise RuntimeError(\"No reactive power reading received yet\")\n return self.readings.reactive_power\n\n def get_real_power(self) -> float: # -> power:float\n \"\"\"Last stored state of real power reading, None if unknown\"\"\"\n if self.readings.real_power is None:\n raise RuntimeError(\"No real power reading received yet\")\n return self.readings.real_power\n","sub_path":"energenie/Devices/MIHO005.py","file_name":"MIHO005.py","file_ext":"py","file_size_in_byte":6679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"70458626","text":"from rest_framework import generics\nfrom rest_framework.permissions import IsAuthenticated\n\nfrom .models import CadastralInfo\nfrom .serializers import CadastralListCreateSerializer, CadastralRetrieveSerializer\nfrom .tasks import get_cadastral_polygon\nfrom .permissions import IsCadastralOwner\n\n\nclass CadastralListCreateView(generics.ListCreateAPIView):\n permission_classes = (IsAuthenticated,)\n serializer_class = CadastralListCreateSerializer\n\n def get_queryset(self):\n return CadastralInfo.objects.filter(\n user_id=self.request.user.id,\n )\n\n def perform_create(self, serializer):\n cadastral_info = serializer.save(user=self.request.user)\n\n get_cadastral_polygon.delay(cadastral_info.pk)\n\n\nclass CadastralRetrieveView(generics.RetrieveAPIView):\n permission_classes = (IsAuthenticated, IsCadastralOwner)\n serializer_class = CadastralRetrieveSerializer\n queryset = CadastralInfo.objects.all()\n lookup_url_kwarg = 'cadastral_id'\n","sub_path":"agronom/cadastral/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"344725560","text":"#!/usr/bin/env python3 \n\ndef printer():\n counter = 0\n while True:\n string = (yield)\n print('[{0}] {1}'.format(counter, string))\n counter += 1\n\nif __name__ == '__main__':\n p = printer()\n next(p)\n p.send('Hi')\n p.send('My name is xuwq.')\n p.send('Bye!')\n","sub_path":"asyncio_coroutine/test_send_object_to_yield.py","file_name":"test_send_object_to_yield.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"141551644","text":"import sys, os, glob, json\nfrom collections import OrderedDict\n\ndef recEncode(obj):\n if isinstance(obj, basestring):\n return obj.encode('utf-8')\n elif isinstance(obj, OrderedDict):\n return OrderedDict((recEncode(k), recEncode(v)) for k,v in obj.items())\n elif isinstance(obj, dict):\n return {recEncode(k): recEncode(v) for k,v in obj.iteritems()}\n elif isinstance(obj, list):\n return [recEncode(x) for x in obj]\n else:\n return obj\n\nidentities = glob.glob(sys.argv[1] + '/*/*/')\n\nfor identity in identities:\n print(identity)\n identity_data_file = identity + 'identity_data.json'\n image_data_file = identity + 'image_data.json'\n faces_file = identity + 'faces.json'\n\n if os.path.exists(identity_data_file):\n with open(identity_data_file, 'r') as fp:\n identity_data = json.load(fp, object_pairs_hook=OrderedDict)\n if 'religions' in identity_data:\n identity_data['religion'] = identity_data['religions']\n del identity_data['religions']\n if 'article_image' in identity_data:\n identity_data['article_image'] = identity_data['article_image'].replace('_', ' ')\n identity_data = recEncode(identity_data)\n with open(identity_data_file, 'w') as fp:\n json.dump(identity_data, fp, ensure_ascii=False)\n if os.path.exists(image_data_file):\n with open(image_data_file, 'r') as fp:\n image_data = json.load(fp)\n image_data = recEncode(image_data)\n with open(image_data_file, 'w') as fp:\n json.dump(image_data, fp, ensure_ascii=False)\n if os.path.exists(faces_file):\n with open(faces_file, 'r') as fp:\n faces = json.load(fp)\n faces = recEncode(faces)\n with open(faces_file, 'w') as fp:\n json.dump(faces, fp, ensure_ascii=False)\n","sub_path":"scripts/misc/fix_identity_json_files.py","file_name":"fix_identity_json_files.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"344029244","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 8 12:53:10 2019\n\n@author: lenamy\n\"\"\"\n\nimport elephant\n\nfrom elephant.spike_train_generation import homogeneous_poisson_process \n\nfrom quantities import Hz, s, ms \n\n \n\n \n\n# create n spike trains with given rate \n\nn = 18\n\nrate_avg = 0.000614316*1000 # Hz \n\nspiketrain_list = [homogeneous_poisson_process(rate=rate_avg*Hz, t_start=0.0*ms, t_stop=3000.0*ms) for i in range(n)] \n\n \n\n#print(spiketrain_list) \n\n \n\nimport matplotlib.pyplot as plt \n\nimport numpy as np \n\n \n\nwidth = 30+1 \n\nkernel = np.hanning(width) \n\nkernel = kernel/kernel.sum() \n\n \n\nt = np.linspace(-0.1, 3001, 10000) \n\nprint(len(t)) \n\n \n\nfiring_rate = np.zeros((len(t),n)) \n\nSmoothFiringRates = np.zeros((len(t),n)) \n\nj = 0 \n\nfor i, spiketrain in enumerate(spiketrain_list): \n\n #plt.plot(spiketrain, i * np.ones_like(spiketrain), 'k*', markersize=2) \n\n entries = np.digitize(spiketrain, t) \n\n firing_rate[entries,j] = 1 \n\n SmoothFiringRates[:,j] = np.convolve(firing_rate[:,j], kernel, mode='same') \n\n plt.plot(t, SmoothFiringRates[:,j]) \n\n #plt.show() \n\n j +=1 \n\n \n\n \n\n \n\npopname = \"e4Nr5a1\" \n\noutfile = \"/home/lenamy/Documents/Thesis/python_output/poisson/poisson_%s_subset_of_%d.txt\" % (popname, n) \n\n \n\nwith open(outfile, 'w+') as datafile_id: \n\n np.savetxt(datafile_id, SmoothFiringRates, delimiter = '\\t') \n\n#plt.axis('tight') \n\n#plt.xlim(0, 1000) \n\n#plt.xlabel('Time (ms)', fontsize=16) \n\n#plt.ylabel('Spike Train Index', fontsize=16) \n\n#plt.gca().tick_params(axis='both', which='major', labelsize=14) \n\n#plt.show() ","sub_path":"Thesis/python_scripts/poisson_generation.py","file_name":"poisson_generation.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"605241852","text":"from otree.api import (\n models, widgets, BaseConstants, BaseSubsession, BaseGroup, BasePlayer,\n Currency as c, currency_range\n)\nimport itertools, random\nimport numpy as np\n\n\n\nauthor = 'Andrea Guido and Anthropolab IT Team - Lille (Antoine Demyer and Flovic Gosselin)'\n\ndoc = \"\"\"\nLabour Market Game\n\"\"\"\n\n\n\nclass Constants(BaseConstants):\n\n name_in_url = 'Beliefs_game_asymmetric_info'\n players_per_group = 2\n num_rounds = 23 #3 trials + 20 of the real game\n Endowment = 12\n instructions_template = 'Andy_asymmetric_info/Summary.html'\n Endowmenthigh= 16 #endowment after positive shock\n Endowmentlow=8 #endowment after negative shock\n round_specials = [8,13,14,18] #these are the rounds with belief elicitation\n bonus_if_in_internal = 5 #this is the belief payment\n\nclass Subsession(BaseSubsession):\n\n draw = models.IntegerField() #1 for positive shock 0 for negative shock #random draw to decide whether + or - shock\n min_wage = models.IntegerField() # 0 for firing treatment, 1 for baseline\n matching = models.CharField()\n online = models.IntegerField()\n def creating_session(self):\n self.draw = self.session.config['treatment']\n self.online = self.session.config['online']\n self.matching = self.session.config['matching']\n self.min_wage = self.session.config['minimum_wage']\n print(\"********This is DRAW the treatment********\", self.draw)\n if self.matching == 'P':\n for g in self.get_groups():\n for p in g.get_players():\n p.type = ['principal', 'agent'][p.id_in_group - 1]\n if self.round_number == 1:\n self.group_randomly(fixed_id_in_group=True)\n print(\"this is the group matrix for round 1\", self.get_group_matrix())\n elif self.round_number < 4:\n self.group_like_round(1)\n elif self.round_number == 4:\n self.group_randomly(fixed_id_in_group=True)\n self.group_like_round(4)\n else:\n self.group_like_round(4)\n print(\"this is the group matrix for round 4\", self.get_group_matrix())\n elif self.matching == 'S':\n for g in self.get_groups():\n for p in g.get_players():\n p.type = ['principal', 'agent'][p.id_in_group - 1]\n self.group_randomly(fixed_id_in_group=True)\n print(\"this is the group matrix for every round\", self.get_group_matrix())\n\n\nclass Group(BaseGroup):\n actuale = models.IntegerField(initial=Constants.Endowment, min=1, max=100) #endowment\n wage = models.IntegerField(min=0) #the wage snet by the firm\n effort = models.FloatField(min=0, max=6, widget= widgets.SliderInput(attrs={'step': '0.25'})) #the effort of worker in normal rounds\n steffort= models.FloatField(min=0, max=6, widget= widgets.SliderInput(attrs={'step': '0.25'}), verbose_name=\"\") #effort from strategy method\n\n random_round_payoff_1 = models.IntegerField() #rnd draw to choose decisional period to be paid\n random_round_payoff_2 = models.IntegerField() # \" \" questionnaire period to be paid\n random_task = models.IntegerField() # task paid\n random_line_belief = models.IntegerField() #line of belief table chosen to payment if accurate\n\n##### VARIABLES FOR ASY-POS SHOCK\n shock_revealed = models.IntegerField(initial=0) # flag : if 1 employer has offered wage > 12 after the shock\n shock_page = models.IntegerField(initial=0) # flag to show the shock page to the shock page\n\n\n def get_variables(self): #function to get effort stated in the strategy method\n players = self.get_players()\n for p in players:\n if p.type == 'agent':\n if (self.round_number==8 or self.round_number==13 or self.round_number==14 or self.round_number==18) and self.shock_page == 0:\n if self.wage == 10:\n self.steffort = p.steffort10\n elif self.wage == 2:\n self.steffort= p.steffort2\n elif self.wage == 3:\n self.steffort= p.steffort3\n elif self.wage == 4:\n self.steffort= p.steffort4\n elif self.wage == 5:\n self.steffort= p.steffort5\n elif self.wage == 6:\n self.steffort= p.steffort6\n elif self.wage == 7:\n self.steffort= p.steffort7\n elif self.wage == 8:\n self.steffort= p.steffort8\n elif self.wage == 9:\n self.steffort= p.steffort9\n elif self.wage == 11:\n self.steffort= p.steffort11\n elif self.wage == 12:\n self.steffort= p.steffort12\n elif self.wage == 13:\n self.steffort= p.steffort13\n elif self.wage == 14:\n self.steffort= p.steffort14\n elif self.wage == 15:\n self.steffort= p.steffort15\n elif self.wage == 16:\n self.steffort= p.steffort16\n else:\n self.steffort= p.steffort1\n\n\n\n def calculate_payoff_final_1(self): #compute payoffs in decisional round\n print(\"***************COMPUTING PAYOFF 1*************\")\n # choose randomly a round\n Liste_round_decisional = random.sample([x for x in list(range(4,Constants.num_rounds+1)) if x not in Constants.round_specials],1)\n self.random_round_payoff_1 = Liste_round_decisional[0] #decisional paid round\n self.random_round_payoff_2 = random.sample(Constants.round_specials,1)[0] #questionnaire paid round\n print(\"so round Decisional is \", self.random_round_payoff_1)\n print(\"so round Questionnaire is \", self.random_round_payoff_2)\n\n print(\"Direct response method is going on\")\n\n # direct-repsponse method\n for p in self.get_players():\n print(\"the type of the player\", p.type)\n\n if p.type == \"agent\":\n p.final_payoff = float(p.in_round(self.random_round_payoff_1).payoff)\n p.payoff_decisional = p.final_payoff\n print(\"This is the payoff of decisional \", p.payoff_decisional)\n else:\n p.final_payoff = float(p.in_round(self.random_round_payoff_1).payoff)\n p.payoff_decisional = p.final_payoff\n print(\"This is the payoff of decisional \", p.payoff_decisional)\n\n def calculate_payoff_final_2(self): # compute payoffs for questionnaire rounds\n print(\"***************** I'm in the method calculate payoff final 2 *****************\")\n self.random_task = np.random.choice(\n [0, 1], 1,\n p=[0.5, 0.5])[0]\n print(\"This is the method chosen (BELIEF (1) vs STRATEGY (0) ):\", self.random_task)\n\n if self.random_task == 1:\n # Belief elicitation method\n for p in self.get_players():\n\n if p.type == \"agent\":\n\n print(\"the type is the agent\", p.type)\n print(\"this is the wage chosen for the strategy method\",\n self.in_round(self.random_round_payoff_2).wage)\n print(\"this is the wagebelief \" ,\n p.in_round(self.random_round_payoff_2).wagebelief)\n\n # belief accuracy\n if p.in_round(self.random_round_payoff_2).wagebelief == self.in_round(\n self.random_round_payoff_2).wage:\n print(\"WORKER's beliefs is In the interval\")\n p.belief_in_interval = 1\n # PAYOFF ASSIGNINIG\n p.payoff_questionnaire = Constants.bonus_if_in_internal\n p.final_payoff += Constants.bonus_if_in_internal\n print(\"This is the payoff of questionnaire \", p.payoff_questionnaire)\n else:\n print(\"WORKER' belief is Out of interval\")\n p.payoff_questionnaire = 0\n p.belief_in_interval = 0\n print(\"This is the payoff of questionnaire \", p.payoff_questionnaire)\n\n else:\n\n print(\"the type is principal\", p.type)\n\n # draw a random line of the table\n ## if thew shock is positive and has been revealed, then draw from all the lines, otherwise\n if self.subsession.draw == 1:\n if self.in_round(self.random_round_payoff_2).shock_revealed == 1:\n self.random_line_belief = random.randint(1,\n self.in_round(\n self.random_round_payoff_2).actuale)\n else:\n self.random_line_belief = random.randint(1, 12)\n else:\n self.random_line_belief = random.randint(1,\n self.in_round(\n self.random_round_payoff_2).actuale)\n\n agent = p.get_others_in_group()[0]\n\n print(\"this is the random line chosen from the table in round 2 \",\n self.random_line_belief)\n\n # extract beliefs from the table\n belief_effort = getattr(p.in_round(self.random_round_payoff_2),\n 'eleffort{}'.format(self.random_line_belief))\n print(\"this is belief effort\", belief_effort)\n #extract strategy effort\n strategy_effort = getattr(agent.in_round(self.random_round_payoff_2),\n 'steffort{}'.format(self.random_line_belief))\n print(\"this is strategy effort\", strategy_effort)\n\n # make the interval\n upper_bound_belief_effort = strategy_effort + 0.25\n lower_bound_belief_effort = strategy_effort - 0.25\n\n # check if in interval\n if lower_bound_belief_effort <= belief_effort <= upper_bound_belief_effort:\n p.belief_in_interval = 1\n p.payoff_questionnaire = Constants.bonus_if_in_internal\n print(\"This is the payoff of questionnaire \", p.payoff_questionnaire)\n p.final_payoff += Constants.bonus_if_in_internal\n print(\"Employer's beliefs are in the interval\")\n else:\n p.belief_in_interval = 0\n p.payoff_questionnaire = 0\n print(\"This is the payoff of questionnaire \", p.payoff_questionnaire)\n p.final_payoff += 0\n print(\"Employer's beliefs are NOT in the interval\")\n else:\n # strategy effort\n print(\"This is the strategy method\")\n\n # compute payoffs\n for p in self.get_players():\n if p.type == \"agent\":\n p.payoff_questionnaire = float(p.in_round(self.random_round_payoff_2).payoff)\n print(\"This is the payoff of questionnaire \", p.payoff_questionnaire)\n print(\"I'm here calculating WORKER PAYOFF\")\n p.final_payoff += float(p.in_round(self.random_round_payoff_2).payoff)\n print(\"OKAY COMPUTED\", p.final_payoff)\n\n else:\n print(\"I M here computing EMPLOYER's PAYOFF\")\n p.payoff_questionnaire = float(p.in_round(self.random_round_payoff_2).payoff)\n print(\"This is the payoff of questionnaire \", p.payoff_questionnaire)\n p.final_payoff += float(p.in_round(self.random_round_payoff_2).payoff)\n print(\"OKAY COMPUTED\", p.final_payoff)\n\n\n def calculate_payoff(self): #payoffs as the game unravels - not paid\n players= self.get_players()\n\n # this is to define the post shock endowment\n if self.round_number >= 13 and self.subsession.draw == 1:\n high=1\n else:\n high=0\n\n # this is to define thz agent's payoff both in normal and elicitation rounds\n for p in players:\n if p.type== 'agent':\n if self.round_number==8 or self.round_number==13 or self.round_number == 14 or self.round_number==18:\n if self.wage > 0:\n p.payoff = self.wage - ((self.steffort) ** 2) / 2\n else:\n p.payoff = 0\n else:\n if self.wage > 0:\n p.payoff = self.wage - ((self.effort) ** 2) / 2\n else:\n p.payoff = 0\n else:\n\n # this is to define the principal's payoff\n\n if self.round_number <= 13:\n #if self.round_number != 1 and self.round_number !=5 and self.round_number != 10 and self.round_number != 11 and self.round_number != 15:\n # p.payoff = (Constants.Endowment - self.wage)*self.effort\n if self.round_number == 8 :\n if self.wage > 0:\n p.payoff = (Constants.Endowment - self.wage) * self.steffort\n print(\"This is ACTUALE and the round is 1 or 5\", self.actuale)\n else:\n p.payoff = Constants.Endowment/2\n elif self.round_number == 13:\n if self.wage >0:\n p.payoff = (Constants.Endowment - self.wage) * self.steffort\n print(\"This is ACTUALE and the round is 10\", self.actuale)\n else:\n p.payoff = Constants.Endowment/2\n\n # propagate the shock\n if high==1:\n for g in self.in_rounds(self.round_number+1, Constants.num_rounds):\n g.actuale = Constants.Endowmenthigh\n else:\n for g in self.in_rounds(self.round_number+1, Constants.num_rounds):\n g.actuale = Constants.Endowmentlow\n else:\n if self.wage >0:\n p.payoff = (Constants.Endowment - self.wage) * self.effort\n else:\n p.payoff = Constants.Endowment/2\n else:\n\n if (self.round_number == 14 or self.round_number == 18) and self.wage >0 :\n p.payoff = (self.actuale - self.wage) * self.steffort\n else:\n if self.wage >0:\n p.payoff = (self.actuale - self.wage) * self.effort\n else:\n p.payoff = self.actuale/2\n\n def check_shock_revealed(self):\n if self.wage > 12 and self.shock_revealed == 0:\n for g in self.in_rounds(self.round_number, Constants.num_rounds):\n g.shock_revealed = 1\n if self.in_round(self.round_number).shock_page == 0:\n self.in_round(self.round_number).shock_page = 1\n\n def rounding(self):\n players= self.get_players()\n for p in players:\n p.final_payoff=round(p.final_payoff,2)\n\n def check_payoff(self):\n players= self.get_players()\n for p in players:\n if p.payoff < 0:\n p.payoff=0\n\n\nclass Player(BasePlayer):\n type = models.CharField()\n belief_in_interval = models.IntegerField()\n\n #FIRM: elicited effort\n eleffort1 = models.FloatField(min=0, max=6, widget=widgets.SliderInput(attrs={'step': '0.25'}),verbose_name=\"\")\n eleffort2 = models.FloatField(min=0, max=6, widget=widgets.SliderInput(attrs={'step': '0.25'}),verbose_name=\"\")\n eleffort3 = models.FloatField(min=0, max=6, widget=widgets.SliderInput(attrs={'step': '0.25'}),verbose_name=\"\")\n eleffort4 = models.FloatField(min=0, max=6, widget=widgets.SliderInput(attrs={'step': '0.25'}),verbose_name=\"\")\n eleffort5 = models.FloatField(min=0, max=6, widget=widgets.SliderInput(attrs={'step': '0.25'}),verbose_name=\"\")\n eleffort6 = models.FloatField(min=0, max=6, widget=widgets.SliderInput(attrs={'step': '0.25'}),verbose_name=\"\")\n eleffort7 = models.FloatField(min=0, max=6, widget=widgets.SliderInput(attrs={'step': '0.25'}),verbose_name=\"\")\n eleffort8 = models.FloatField(min=0, max=6, widget=widgets.SliderInput(attrs={'step': '0.25'}),verbose_name=\"\")\n eleffort9 = models.FloatField(min=0, max=6, widget=widgets.SliderInput(attrs={'step': '0.25'}),verbose_name=\"\")\n eleffort10 = models.FloatField(min=0, max=6, widget=widgets.SliderInput(attrs={'step': '0.25'}),verbose_name=\"\")\n eleffort11 = models.FloatField(min=0, max=6, widget=widgets.SliderInput(attrs={'step': '0.25'}),verbose_name=\"\")\n eleffort12 = models.FloatField(min=0, max=6, widget=widgets.SliderInput(attrs={'step': '0.25'}),verbose_name=\"\")\n eleffort13 = models.FloatField(min=0, max=6, widget=widgets.SliderInput(attrs={'step': '0.25'}),verbose_name=\"\")\n eleffort14 = models.FloatField(min=0, max=6, widget=widgets.SliderInput(attrs={'step': '0.25'}),verbose_name=\"\")\n eleffort15 = models.FloatField(min=0, max=6, widget=widgets.SliderInput(attrs={'step': '0.25'}),verbose_name=\"\")\n eleffort16 = models.FloatField(min=0, max=6, widget=widgets.SliderInput(attrs={'step': '0.25'}),verbose_name=\"\")\n\n #WORKER: strategy method effort\n steffort1 = models.FloatField(min=0, max=6, widget=widgets.SliderInput(attrs={'step': '0.25'}), verbose_name=\"\")\n steffort2 = models.FloatField(min=0, max=6, widget=widgets.SliderInput(attrs={'step': '0.25'}), verbose_name=\"\")\n steffort3 = models.FloatField(min=0, max=6, widget=widgets.SliderInput(attrs={'step': '0.25'}), verbose_name=\"\")\n steffort4 = models.FloatField(min=0, max=6, widget=widgets.SliderInput(attrs={'step': '0.25'}), verbose_name=\"\")\n steffort5 = models.FloatField(min=0, max=6, widget=widgets.SliderInput(attrs={'step': '0.25'}), verbose_name=\"\")\n steffort6 = models.FloatField(min=0, max=6, widget=widgets.SliderInput(attrs={'step': '0.25'}), verbose_name=\"\")\n steffort7 = models.FloatField(min=0, max=6, widget=widgets.SliderInput(attrs={'step': '0.25'}), verbose_name=\"\")\n steffort8 = models.FloatField(min=0, max=6, widget=widgets.SliderInput(attrs={'step': '0.25'}), verbose_name=\"\")\n steffort9 = models.FloatField(min=0, max=6, widget=widgets.SliderInput(attrs={'step': '0.25'}), verbose_name=\"\")\n steffort10 = models.FloatField(min=0, max=6, widget=widgets.SliderInput(attrs={'step': '0.25'}), verbose_name=\"\")\n steffort11 = models.FloatField(min=0, max=6, widget=widgets.SliderInput(attrs={'step': '0.25'}), verbose_name=\"\")\n steffort12 = models.FloatField(min=0, max=6, widget=widgets.SliderInput(attrs={'step': '0.25'}), verbose_name=\"\")\n steffort13 = models.FloatField(min=0, max=6, widget=widgets.SliderInput(attrs={'step': '0.25'}), verbose_name=\"\")\n steffort14 = models.FloatField(min=0, max=6, widget=widgets.SliderInput(attrs={'step': '0.25'}), verbose_name=\"\")\n steffort15 = models.FloatField(min=0, max=6, widget=widgets.SliderInput(attrs={'step': '0.25'}), verbose_name=\"\")\n steffort16 = models.FloatField(min=0, max=6, widget=widgets.SliderInput(attrs={'step': '0.25'}), verbose_name=\"\")\n\n #WORKER: elicited wage\n wagebelief = models.IntegerField(min=1, verbose_name='')\n #payoff from questionnaire period chosen randomly\n payoff_questionnaire = models.FloatField(initial=0)\n #payoff from decisional period chosen randomly\n payoff_decisional = models.FloatField(initial=0)\n #Payment at the end of the experiment\n final_payoff = models.FloatField(initial=0)\n #gender\n gender = models.IntegerField(choices=[[1,'Male'], [2,'Female'], [3,'Other']], verbose_name='')\n #education\n undergrad = models.IntegerField(choices=[[1,'Yes'], [0,'No']],verbose_name='')\n #treatment check\n asymmetry_check = models.IntegerField(choices=[[1, 'ONLY EMPLOYERS in this experiment'], [0, 'BOTH EMPLOYERS and WORKERS in this experiment']],\n verbose_name=\"The change in employers' endowment (from $12 to $8) was announced to\",\n blank=True)\n #comments\n comments = models.TextField(\n blank=True,\n max_length=3000,\n verbose_name = 'Please, write here any comment concerning the experiment (for example: game length, instructions, game). We would love to hear your opinion about it.'\n\n )","sub_path":"OnlineExperiment_Asymmetric/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":22270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"250276260","text":"import math\n\ndef square(side):\n\tper = side*4\n\ts = side*2\n\td = math.sqrt(s*2)\n\treturn per,s,d\n\n\nwhile True:\n\tside=int(input(\"Enter square side for the operations\"))\n\tprint(square(side))\n\tx=str(input(\"Enter Exit to stop the Program\"))\n\tif x==\"Exit\":\n\t\tbreak","sub_path":"square.py","file_name":"square.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"380712915","text":"\"\"\" Script for creating an inventory yaml \"\"\"\n\nimport os\n\nfrom src import utils\n\n\ndef prepare_cert_path(certification, certification_dir):\n \"\"\" Prepare the path for a specific certification \"\"\"\n if not certification_dir:\n certification_dir = 'exports/certifications/'\n return os.path.join(certification_dir, '{0}.yaml'.format(certification))\n\n\ndef analyze_attribute(attribute):\n \"\"\" Check how many elements an attribute has otherwise if it's a list\n if it's not a list return that it's present otherwise return \"Missing \"\"\"\n if isinstance(attribute, list) or isinstance(attribute, dict):\n return len(attribute)\n elif attribute:\n return \"Present\"\n return \"Missing\"\n\n\ndef analyze_component(component):\n \"\"\" Analyze a component to find gaps in governors and references \"\"\"\n return {\n 'references': analyze_attribute(component.get('references')),\n 'verifications': analyze_attribute(component.get('verifications')),\n 'documentation_completed': component.get('documentation_complete'),\n }\n\n\ndef catalog_control(inventory, control, standard_key, control_key):\n \"\"\" Adds all the components in the control into the inventory\n while determing the gaps \"\"\"\n if 'justifications' in control:\n for component in control['justifications']:\n system_key = component.get('system', 'No System')\n component_key = component.get('component', 'No Name')\n # Catalog component in certification inventory\n if system_key not in inventory[standard_key][control_key]:\n inventory[standard_key][control_key][system_key] = {}\n if component_key not in inventory[standard_key][control_key][system_key]:\n inventory[standard_key][control_key][system_key][component_key] = {}\n inventory[standard_key][control_key][system_key][component_key] = {\n 'implementation_status': component.get('implementation_status', 'Missing'),\n 'narrative': analyze_attribute(component.get('narrative')),\n 'references': analyze_attribute(component.get('references'))\n }\n else:\n inventory[standard_key][control_key] = \"Missing Justifications\"\n\n\ndef catalog_component(component, inventory, system_key, component_key):\n \"\"\" Summarizes the data in the components dict \"\"\"\n inventory['components'][system_key][component_key] = analyze_component(component)\n\n\ndef inventory_standards(certification, inventory):\n \"\"\" Populate the inventory for standards \"\"\"\n for standard_key in certification['standards']:\n inventory[standard_key] = {}\n for control_key in certification['standards'][standard_key]:\n inventory[standard_key][control_key] = {}\n control = certification['standards'][standard_key][control_key]\n catalog_control(inventory, control, standard_key, control_key)\n\n\ndef inventory_components(certification, inventory):\n \"\"\" Populate the inventory for components \"\"\"\n for system_key in certification['components']:\n if system_key not in inventory['components']:\n inventory['components'][system_key] = {}\n for component_key in certification['components'][system_key]:\n catalog_component(\n certification['components'][system_key][component_key],\n inventory,\n system_key,\n component_key\n )\n\n\ndef build_inventory(certification_path):\n \"\"\" Create an inventory of components for a specific certification \"\"\"\n certification = utils.yaml_loader(certification_path)\n inventory = {\n 'certification': certification.get('name'),\n 'components': {}\n }\n inventory_standards(certification, inventory)\n inventory_components(certification, inventory)\n return inventory\n\n\ndef create_inventory(certification_path, output_path):\n \"\"\" Creates an inventory yaml \"\"\"\n inventory = build_inventory(certification_path)\n inventory_path = os.path.join(\n output_path,\n inventory.get('certification') + '.yaml'\n )\n utils.yaml_writer(inventory, inventory_path)\n return inventory_path\n","sub_path":"src/renderers/inventory_builder.py","file_name":"inventory_builder.py","file_ext":"py","file_size_in_byte":4182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"473130298","text":"#!/usr/bin/env python3\n# vim: set ts=4 sw=4 et smartindent ignorecase fileencoding=utf8:\nimport curses\nimport time\nfrom math import floor\nfrom datetime import datetime\n\nNUM_DOT = (\n ('***** ',\n '* * ',\n '* * ',\n '* * ',\n '***** ',),\n (' * ',\n ' * ',\n ' * ',\n ' * ',\n ' * ',),\n ('***** ',\n ' * ',\n '***** ',\n '* ',\n '***** ',),\n ('***** ',\n ' * ',\n '***** ',\n ' * ',\n '***** ',),\n ('* * ',\n '* * ',\n '***** ',\n ' * ',\n ' * ',),\n ('***** ',\n '* ',\n '***** ',\n ' * ',\n '***** ',),\n ('***** ',\n '* ',\n '***** ',\n '* * ',\n '***** ',),\n ('***** ',\n '* * ',\n '* * ',\n ' * ',\n ' * ',),\n ('***** ',\n '* * ',\n '***** ',\n '* * ',\n '***** ',),\n ('***** ',\n '* * ',\n '***** ',\n ' * ',\n '***** ',),\n)\n\nCOLON_DOT = (\n (' ',\n ' * ',\n ' ',\n ' * ',\n ' ',)\n)\n\ndef P(stdscr, msg):\n stdscr.addstr(0, 0, msg)\n stdscr.refresh()\n stdscr.getkey()\n\ndef view_dot(stdscr, xs, max_height, dot, scale):\n width = 5 * scale\n height = 5 * scale\n ys = floor((max_height - height) / 2 + 0.5)\n for y in range(0, int(height)):\n for x in range(0, int(width)):\n xn = floor((x + 0.5) / scale)\n yn = floor((y + 0.5) / scale)\n ch = dot[yn][xn]\n stdscr.addch(ys + y, xs + x, ch)\n\ndef view_num(stdscr, xs, max_height, num, scale):\n view_dot(stdscr, xs, max_height, NUM_DOT[num], scale)\n\ndef view_normal(stdscr, max_width, max_height, now):\n xs = floor((max_width - 8) / 2 + 0.5)\n ys = floor((max_height - 5) / 2 + 0.5)\n msg = now.strftime('%H:%M:%S')\n stdscr.addstr(ys, xs, msg)\n\ndef main(stdscr):\n stdscr.nodelay(True)\n stdscr.clear()\n height, width = stdscr.getmaxyx()\n num_width = width // 8\n scale = num_width / 6\n try:\n while True:\n now = datetime.now()\n hour = now.hour\n minutes = now.minute\n second = now.second\n if scale < 1.0:\n view_normal(stdscr, width, height, now)\n else:\n view_num(stdscr, 0, height, hour // 10, scale)\n view_num(stdscr, num_width, height, hour % 10, scale)\n view_dot(stdscr, num_width * 2, height, COLON_DOT, scale)\n view_num(stdscr, num_width * 3, height, minutes // 10, scale)\n view_num(stdscr, num_width * 4, height, minutes % 10, scale)\n view_dot(stdscr, num_width * 5, height, COLON_DOT, scale)\n view_num(stdscr, num_width * 6, height, second // 10, scale)\n view_num(stdscr, num_width * 7, height, second % 10, scale)\n stdscr.refresh()\n ch = stdscr.getch()\n if ch == ord('q'):\n break\n elif ch == curses.KEY_RESIZE:\n height, width = stdscr.getmaxyx()\n num_width = width // 8\n scale = num_width / 6\n stdscr.clear()\n wait_time = 1.0 - datetime.now().microsecond / 1000000.0\n time.sleep(wait_time)\n except KeyboardInterrupt:\n pass\n\nif __name__ == '__main__':\n curses.wrapper(main)\n","sub_path":"dclock.py","file_name":"dclock.py","file_ext":"py","file_size_in_byte":3341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"63338816","text":"from setuptools import setup, find_packages\n\nbase_requirements = [\n 'bottlecap',\n 'peewee_extras',\n 'werkzeug>=0.11',\n 'click>=6.2',\n 'schematics>=1.1',\n 'six>=1.10',\n 'peewee>=2.7',\n 'bottle>=0.12',\n 'jinja2>=2.8',\n 'psycopg2>=2.6',\n 'pyjwt>=1.4',\n 'oath>=1.4',\n 'rq>=0.5',\n 'twilio>=5.3'\n]\n\nsetup(\n name=\"backpack\",\n description=\"Backpack\",\n author='Cal Leeming',\n author_email='cal@iops.io',\n url='https://github.com/foxx/backpack',\n version=\"0.4.1\",\n py_modules=['backpack'],\n setup_requires=[\n 'pytest-runner>=2.6',\n 'yanc>=0.3'\n ],\n install_requires=base_requirements,\n tests_require=base_requirements+[\n 'pytest-benchmark>=3.0',\n 'pytest-raisesregexp>=2.1',\n 'pytest-cov>=2.2',\n 'pytest>=2.8',\n 'webtest>=2.0',\n 'python-coveralls',\n 'beautifulsoup4>=4.3.2',\n 'freezegun==0.3.5',\n 'requests>=2.9',\n 'names>=0.3',\n 'tox',\n 'fakeredis>=0.6'\n ],\n dependency_links=[\n 'git+https://github.com/foxx/bottlecap.git@master#egg=bottlecap-master',\n 'git+https://github.com/foxx/peewee-extras.git@master#egg=peewee_extras-master',\n ],\n classifiers=[\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4'\n ]\n)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"149600562","text":"import os\nimport sys\nimport random\n\n\ndef write_list(writer, lst):\n for value in lst:\n writer.write(\" \" + str(value))\n writer.write(\"\\n\")\n return\n\n\ndef rand_list(length):\n lst = list()\n for k in range(0, length):\n lst.append(random.randint(-16, 16))\n return lst\n\n\ndef generate_tests(file_path):\n counter = 0\n with open(file_path, \"w\") as writer:\n write_list(writer, [])\n counter += 1\n for value in range(-12, 12):\n write_list(writer, [value])\n counter += 1\n for x in range(-4, 4):\n for y in range(-4, 4):\n write_list(writer, [x, y])\n counter += 1\n year = -2000\n for k in range(0, 10):\n for month in range(-2, 15):\n for day in range(-2, 34):\n write_list(writer, [year, month, day])\n counter += 1\n year = year + 1\n for length in range(4, 256):\n write_list(writer, rand_list(length))\n counter += 1\n return counter\n\n\nif __name__ == \"__main__\":\n print(\"Generate\", generate_tests(sys.argv[1]), \"tests.\")\n","sub_path":"TestData/gentst/check_date.py","file_name":"check_date.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"414720955","text":"import os\nfrom TreeViewItem.TreeViewItem import TreeViewItem\n\nclass TreeViewItemFolder(TreeViewItem):\n indent = 1\n dragable = True\n\n def visit(self, visitor):\n super(TreeViewItemFolder, self).visit()\n\n screenDatabase = self.owner\n datas = []\n for photo in self.item.photos:\n datas.append(photo.data_item(screenDatabase))\n\n screenDatabase.data = datas\n screenDatabase.update_can_browse()\n screenDatabase.update_selected()\n\n\n","sub_path":"TreeViewItem/TreeViewItemFolder.py","file_name":"TreeViewItemFolder.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"155825338","text":"#!/usr/bin/env python\nimport argparse\nimport multiprocessing\nimport os\nimport subprocess\nfrom typing import List\nimport sys\nimport time\nfrom aido_utils import get_device_list, show_status\n\n\ndef copy_bags_device(device: str, OUTPUT_DIR: str, starting_time, ending_time):\n\n # print(device)\n # print(starting_time)\n # print(ending_time)\n cmd = \"docker -H %s.local rm -f clipper || echo bla && docker -H %s.local run --rm -dit --net host \\\n --name clipper -v /data/logs:/data/logs duckietown/rpi-duckiebot-base:master19 \\\n /bin/bash -c \\\"cd /data/logs; \\\n source /home/software/catkin_ws/devel/setup.bash; \\\n for file in *; do if [[ \\$file != *\\\"clipped\\\"* && \\$file != *\\\".orig.\\\"* ]]; then \\\n if [ ! -f clipped_\\$file ]; then rosbag reindex \\$file; fi; fi; done; \\\n for file in *; do if [[ \\$file != *\\\"clipped\\\"* && \\$file != *\\\".orig.\\\"* ]]; then \\\n rosbag info \\$file || sudo unlink *.orig.* || echo bla && rosbag info \\$file || rosbag reindex \\$file ; fi; done; \\\n for file in *; do if [[ \\$file != *\\\"clipped\\\"* && \\$file != *\\\".orig.\\\"* ]]; then \\\n rosbag info \\$file || sudo unlink *.orig.* || echo bla && rosbag info \\$file || rosbag reindex \\$file ; fi; done; \\\n for file in *; do if [[ \\$file != *\\\"clipped\\\"* && \\$file != *\\\".orig.\\\"* ]]; then \\\n sudo mv \\$file \\${file%%.active} || echo bla && sudo unlink *.orig.* || echo blabla; fi; done; \\\n for file in *; do if [[ \\$file != *\\\"clipped\\\"* && \\$file != *\\\".orig.\\\"* ]]; then \\\n rosbag info \\$file && rosbag filter \\$file clipped_\\$file \\' %f <= t.to_sec() <= %f \\' && sudo rm -f \\$file; fi; done \\\" \" % (device, device, starting_time, ending_time)\n print(cmd)\n try:\n error = subprocess.check_output(\n cmd, shell=True, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as error:\n return \"Error while attempting to clip bags: %s\" % error.output.decode(\"utf-8\")\n\n time.sleep(2)\n\n try:\n while True:\n cmd = 'docker -H %s.local inspect -f \\'{{.State.Running}}\\' clipper' % device\n\n res = subprocess.check_output(\n cmd, shell=True, stderr=subprocess.STDOUT)\n if res.decode(\"utf-8\") == \"false\\n\":\n break\n time.sleep(0.5)\n except Exception as e:\n res = e.output.decode(\"utf-8\")\n if \"No such object\" in res:\n pass\n else:\n print(res)\n # cmd = 'docker -H %s.local stop clipper' % device\n # try:\n # error = subprocess.check_output(\n # cmd, shell=True, stderr=subprocess.STDOUT)\n # except subprocess.CalledProcessError:\n # return \"Error while attempting to remove clipper\"\n\n # cmd = 'ssh -q %s \"ls -t /data/logs | head -1\"' % device\n\n # try:\n # filename = subprocess.check_output(\n # cmd, shell=True, stderr=subprocess.STDOUT)\n # filename = filename.rstrip().decode(\"utf-8\")\n # if filename == \"\":\n # return \"No files\"\n # except subprocess.CalledProcessError:\n # return \"SSH Error\"\n\n # fn = os.path.join(OUTPUT_DIR, filename)\n\n # cmd = 'ssh %s \"md5sum /data/logs/%s\"' % (device, filename)\n\n # try:\n # md5_before_copy = subprocess.check_output(cmd, shell=True)\n # md5_before_copy = (md5_before_copy.rstrip().decode(\"utf-8\")).split()[0]\n # except subprocess.CalledProcessError:\n # return \"MD5 error - agent\"\n\n # check if file already exists\n\n # cmd = 'if [ -f %s ]; then echo \"Exists\"; else echo \"No\"; fi' % (\n # os.path.splitext(fn)[0])\n # try:\n # res = subprocess.check_output(cmd, shell=True)\n # res = res.rstrip().decode(\"utf-8\")\n\n # if res == \"Exists\":\n # return \"Already exists\"\n # except subprocess.CalledProcessError:\n # return \"Error\"\n\n cmd = 'rsync -avz --block-size=131072 --protocol=29 --partial-dir=.rsync-partial %s:/data/logs/ %s/' % (\n device, OUTPUT_DIR)\n\n try:\n subprocess.check_output(cmd, shell=True, stderr=subprocess.PIPE)\n except subprocess.CalledProcessError as e:\n return \"Copy failed : %s\" % e.output.decode(\"utf-8\")\n\n # cmd = 'md5sum %s' % fn\n\n # try:\n # md5_after_copy = subprocess.check_output(cmd, shell=True)\n # md5_after_copy = md5_after_copy.rstrip().decode(\"utf-8\").split()[0]\n # except subprocess.CalledProcessError:\n # return \"MD5 error - server\"\n\n # if md5_after_copy == md5_before_copy:\n # cmd = 'rosbag reindex %s' % fn\n # try:\n # subprocess.check_output(cmd, shell=True)\n # except subprocess.CalledProcessError:\n # os.unlink('%s' % fn)\n # return \"Reindex error\"\n\n # fn_root = os.path.splitext(fn)[0]\n # os.rename(fn, fn_root)\n # os.unlink('%s.orig.active' % fn_root)\n # return \"MD5 matches\"\n # else:\n # os.unlink(fn)\n # return \"MD5 mismatch\"\n\n return \"success - not checking MD5\"\n\n\ndef list_files(OUTPUT_DIR):\n\n # This messes up the terminal\n # cmd = \"watch -n1 -d \\\"ls -Al %s | awk '{print \\$9, \\$5}'\\\"\" % OUTPUT_DIR\n # res = subprocess.Popen(\"exec \" + cmd, shell=True,\n # stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n # for line in iter(lambda: res.stdout.read(1), ''):\n # st = line.decode('utf-8')\n # sys.stdout.write(st)\n # sys.stdout.flush()\n\n # res.terminate()\n OUTPUT_DIR = os.path.abspath(OUTPUT_DIR)\n # print(OUTPUT_DIR)\n while True:\n files = os.listdir(OUTPUT_DIR)\n string = \"-------------------------------------------------------\\n\"\n # print(files)\n for f in sorted(files):\n if (os.path.isfile(os.path.join(OUTPUT_DIR, f))):\n f_size = os.path.getsize(os.path.join(OUTPUT_DIR, f))\n string += \"%s : %9.3f MBs\\n\" % (f,\n float(f_size)/(1024.0*1024.0))\n sys.stdout.write(string)\n sys.stdout.flush()\n time.sleep(1)\n\n # os.system(\n # \"watch -n1 -d \\\"ls -Al %s | awk '{print \\$9, \\$5}'\\\"\" % OUTPUT_DIR)\n\n\ndef copy_bags_all_devices(device_list: List[str], OUTPUT_DIR, starting_time, ending_time):\n\n p = multiprocessing.Process(target=list_files, args=(OUTPUT_DIR,))\n p.start()\n\n pool = multiprocessing.Pool(processes=20)\n results = pool.starmap(\n copy_bags_device, [(_, OUTPUT_DIR, starting_time, ending_time) for _ in device_list])\n\n pool.close()\n pool.join()\n time.sleep(5)\n p.terminate()\n p.join()\n\n print()\n show_status(device_list, results)\n\n\ndef copy_bags_main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--outdir', help='output directory location')\n\n parser.add_argument('starting_time', type=float,\n help='starting timestamp')\n parser.add_argument('ending_time', type=float,\n help='ending timestamp')\n args = parser.parse_args()\n\n OUTPUT_DIR = args.outdir\n\n if OUTPUT_DIR == None:\n OUTPUT_DIR = 'bags/'\n\n if not os.path.exists(OUTPUT_DIR):\n os.makedirs(OUTPUT_DIR)\n\n device_list = get_device_list('device_list.txt')\n\n print('Copying bags:')\n copy_bags_all_devices(device_list, OUTPUT_DIR,\n args.starting_time, args.ending_time)\n\n\nif __name__ == '__main__':\n copy_bags_main()\n","sub_path":"aido-scripts/copy_bags.py","file_name":"copy_bags.py","file_ext":"py","file_size_in_byte":7418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"481057175","text":"# Copyright cozybit, Inc 2010-2012\n# All rights reserved\n\nfrom wtf.util import *\nimport wtf.node as node\nimport sys\nerr = sys.stderr\n\n\nclass SnifferBase(node.NodeBase):\n\n \"\"\"\n Sniffer STA\n\n This represents a platform-independent monitor STA that should be used by tests.\n\n Real Sniffer STAs should extend this class and implement the actual AP functions.\n \"\"\"\n\n def __init__(self, comm):\n \"\"\"\n Create sniffer STA with the supplied default configuration.\n \"\"\"\n node.NodeBase.__init__(self, comm=comm)\n\n\nclass SnifferConf():\n\n def __init__(self, channel=1, htmode=\"\", iface=None):\n self.channel = channel\n self.htmode = htmode\n self.iface = iface\n\n\nclass SnifferSTA(node.LinuxNode, SnifferBase):\n\n def __init__(self, comm, ifaces):\n node.LinuxNode.__init__(self, comm, ifaces)\n\n def start(self):\n for iface in self.iface:\n self._cmd_or_die(\"iw \" + iface.name + \" set type monitor\")\n self._cmd_or_die(\"iw \" + iface.name + \" set monitor control\")\n self._cmd_or_die(\"ifconfig \" + iface.name + \" up\")\n self._cmd_or_die(\"iw \" + iface.name + \" set channel \" + str(iface.conf.channel) +\n \" \" + iface.conf.htmode)\n iface.cap = CapData(monif=iface.name, promisc=True)\n\n def stop(self):\n node.LinuxNode.stop(self)\n","sub_path":"wtf/wtf/node/sniffer.py","file_name":"sniffer.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"27218912","text":"#coding=utf-8\n\n\"\"\"\nDjango settings for untitled2 project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.6/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.6/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'l&1a_8s-nl7clxq^kmocekfv=0t%#$j%pfp$uo*-6=h4lcaba0'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = (\n 'suit',\n 'suit_ckeditor',\n\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n #'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\n 'debug_toolbar',\n 'bootstrap3',\n 'mptt',\n\n 'apps.slide',\n 'apps.root',\n 'apps.menu',\n 'apps.static',\n 'apps.comment',\n\n 'apps.utils.img',\n 'apps.utils.setting',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\nMIDDLEWARE_CLASSES += (\n # Моя вставка\n 'django.middleware.http.ConditionalGetMiddleware',\n 'django.middleware.gzip.GZipMiddleware',\n)\nMIDDLEWARE_CLASSES += (\n 'compat.midlewareHTMLCompress.SpacelessMiddleware',\n)\n\nMIDDLEWARE_CLASSES += (\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n)\n\n\nfrom django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS as TCP\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n 'django.core.context_processors.csrf',\n)\n\nTEMPLATE_CONTEXT_PROCESSORS += TCP + (\n 'django.core.context_processors.request',\n # 'django.core.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n # Мой context processor\n 'proj.context_processor.context',\n # 'compressor',\n)\n\n\nROOT_URLCONF = 'proj.urls'\n\nWSGI_APPLICATION = 'proj.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.6/ref/settings/#databases\nfrom os.path import abspath, dirname, join, isfile\nPROJECT_PATH = abspath(dirname(__name__, ), )\npath = lambda base: abspath(\n join(\n PROJECT_PATH, base,\n ).replace('\\\\', '/')\n)\nSERVER = isfile(path('flags/server.key', ), )\nif not SERVER:\n DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db/db.sqlite3', ),\n }\n }\nelif isfile(path('flags/MySQLdb.key', ), ):\n DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.\n 'NAME': 'bounty-tour_ua', # Or path to database file if using sqlite3.\n 'USER': 'bounty-tour_ua', # Not used with sqlite3.\n 'PASSWORD': '5tySHBdHeh8f3uRr', # Not used with sqlite3.\n 'HOST': '192.168.1.90', # Set to empty string for localhost. Not used with sqlite3.\n 'PORT': '3306', # Set to empty string for default. Not used with sqlite3.\n }\n }\nelif isfile(path('flags/pgSQLdb.key', ), ):\n DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.\n 'NAME': 'bounty-tour_com_ua', # Or path to database file if using sqlite3.\n 'USER': 'bounty-tour_com_ua', # Not used with sqlite3.\n 'PASSWORD': '5ZqUcJdWzJbsc6pP', # Not used with sqlite3.\n 'HOST': '192.168.1.12', # Set to empty string for localhost. Not used with sqlite3.\n 'PORT': '5432', # Set to empty string for default. Not used with sqlite3.\n }\n }\n# Internationalization\n# https://docs.djangoproject.com/en/1.6/topics/i18n/\n\nLANGUAGE_CODE = 'ru-ru'\n\nTIME_ZONE = 'Europe/Kiev'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.6/howto/media-files/\n\nSTATIC_URL = '/static/'\nMEDIA_URL = '/media/'\nif not SERVER:\n MEDIA_ROOT = path('media', )\nelse:\n MEDIA_ROOT = path('../../media/bounty-tour_com_ua', )\n\nTEMPLATE_DIRS = (\n os.path.join(BASE_DIR, 'templates', ),\n # os.path.join(BASE_DIR, 'VirtualEnv/lib/python2.7/site-packages/django_mptt_admin/templates', ),\n)\n\nTEMPLATE_LOADERS = (\n ('django.template.loaders.cached.Loader',\n (\n 'jingo.Loader', # Jingo dependency\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n ),\n ),\n)\n\nJINGO_INCLUDE_PATTERN = r'\\.jinja2.html' # use any regular expression here\nJINGO_EXCLUDE_APPS = ('debug_toolbar', 'suit', 'suit_ckeditor', )\n\n\n# This is the default backend. Email will be sent through a SMTP server.\n\n# The value for each argument is retrieved from the matching setting if the argument is None:\n\n# host: EMAIL_HOST\nEMAIL_HOST = 'smtp.yandex.ru'\n# port: EMAIL_PORT\nEMAIL_PORT = 587\n# username: EMAIL_HOST_USER\nEMAIL_HOST_USER = 'comment@bounty-tour.com.ua'\n# password: EMAIL_HOST_PASSWORD\nEMAIL_HOST_PASSWORD = '1q2w3e4r5t'\n# use_tls: EMAIL_USE_TLS\nEMAIL_USE_TLS = True\n\n# The SMTP backend is the default configuration inherited by Django.\n# If you want to specify it explicitly, put the following in your settings:\n\nEMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'\n\n","sub_path":"proj/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":5954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"378927983","text":"#!/usr/bin/env python3\n\n\"\"\"\n\n Script to provide starfish tools\n\n\n\"\"\"\n\nimport argparse\nimport logging\n\n\nfrom starfish.tool.create_account_command import CreateAccountCommand\nfrom starfish.tool.get_command import GetCommand\nfrom starfish.tool.send_command import SendCommand\nfrom starfish.tool.tool_output import ToolOutput\nfrom starfish.tool.wait_network_command import WaitNetworkCommand\n\n\ndef main():\n\n parser = argparse.ArgumentParser(description='Starfish Tools')\n\n parser.add_argument(\n '-u',\n '--url',\n help=f'URL of the network node',\n )\n\n parser.add_argument(\n '-d',\n '--debug',\n action='store_true',\n help=f'Debug mode on or off. Default: False',\n )\n\n parser.add_argument(\n '-j',\n '--json',\n action='store_true',\n help='Output data as JSON values'\n )\n\n command_parser = parser.add_subparsers(\n title='Starfish command',\n description='Tool command',\n help='Tool command',\n dest='command'\n )\n\n command_list = [\n CreateAccountCommand(command_parser),\n GetCommand(command_parser),\n SendCommand(command_parser),\n WaitNetworkCommand(command_parser)\n ]\n\n args = parser.parse_args()\n\n if args.debug:\n logging.basicConfig(level=logging.DEBUG)\n\n output = ToolOutput()\n\n for command_item in command_list:\n if command_item.is_command(args.command):\n command_item.execute(args, output)\n break\n\n output.printout(args.json)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"tools/starfish_tools.py","file_name":"starfish_tools.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"297165030","text":"from statistics import median\n\ndef main():\n N = int(input())\n strs = input()\n As = [int(num) for num in strs.split()]\n nums = [a - i for i, a in enumerate(As, 1)]\n m = int(median(nums))\n print(sum([abs(a - m) for a in nums]))\n\nif __name__ == '__main__':\n main()\n","sub_path":"abc102/problem_c.py","file_name":"problem_c.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"347677146","text":"# Modify the program from the Second Dictionary challenge of lecture 56\n# to use shelves instead of dictionaries.\n#\n# Do this by creating two programs. cave_initialise.py should create the two\n# shelves (locations and vocabulary) with the appropriate keys and values.\n#\n# cave_game.py will then use the two shelves instead of dictionaries.\n# Apart from opening and closing the shelves, cave_game will need only\n# two changes to the actual code - remember that shelf keys MUST be strings!\n#\n# Just to be clear, cave_game.py will contain the code from line 45, everything\n# before that (modified to use shelves) will be in cave_initialise.py.\n\nimport shelve\n\nwith shelve.open('books') as books:\n\n loc = 1\n while True:\n availableExits = \", \".join(books['locations'][loc][\"exits\"].keys())\n\n print(books['locations'][loc][\"desc\"])\n\n if loc == 0:\n break\n else:\n allExits = books['locations'][loc][\"exits\"].copy()\n allExits.update(books['locations'][loc][\"namedExits\"])\n\n direction = input(\"Available exits are \" + availableExits).upper()\n print()\n\n # Parse the user input, using our vocabulary dictionary if necessary\n if len(direction) > 1: # more than 1 letter, so check vocab\n words = direction.split()\n for word in words:\n if word in books['vocabulary']: # does it contain a word we know?\n direction = books['vocabulary'][word]\n break\n\n if direction in allExits:\n loc = allExits[direction]\n else:\n print(\"You cannot go in that direction\")\n","sub_path":"shelvewithpickles/cave_game.py","file_name":"cave_game.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"402555231","text":"# Copyright 2016 - 2020 Ternaris.\n# SPDX-License-Identifier: Apache-2.0\n\n\"\"\"Image stream conversion nodes.\"\"\"\n\nimport marv_api as marv\nfrom marv_robotics.bag import make_deserialize, messages\nfrom marv_ros.img_tools import ImageConversionError, ImageFormatError\nfrom marv_ros.img_tools import imgmsg_to_cv2\n\n\n@marv.node()\n@marv.input('stream', default=marv.select(messages, '/kitti/camera_color_left/image_raw'))\ndef rosmsg_imgstream(stream):\n \"\"\"Convert stream of raw ros messages into stream of deserialized messages.\"\"\"\n deserialize = make_deserialize(stream)\n while True:\n msg = yield marv.pull(stream)\n if msg is None:\n break\n\n rosmsg = deserialize(msg.data)\n yield marv.push(rosmsg)\n\n\n@marv.node()\n@marv.input('stream', default=rosmsg_imgstream)\ndef imgsrc(stream):\n \"\"\"Convert ROS sensor_msgs/Image stream into cv2 image stream.\"\"\"\n while True:\n rosmsg = yield marv.pull(stream)\n if rosmsg is None:\n break\n\n try:\n img = imgmsg_to_cv2(rosmsg, 'bgr8')\n except (ImageFormatError, ImageConversionError) as err:\n log = yield marv.get_logger()\n log.error('could not convert image from topic %s: %s ', stream.topic, err)\n raise marv.Abort()\n\n yield marv.push(img)\n","sub_path":"lectures/13_MARV_Data_Analytics/site5/code/marv_tutorials/marv_tutorials/imgsrc.py","file_name":"imgsrc.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"142635441","text":"#!/usr/bin/env python\n\nimport sys\nimport os\nimport os.path\nimport fnmatch\n\ntop = '''#!/bin/bash\n#SBATCH --nodes=1\n#SBATCH --ntasks=10\n#SBATCH --mem=40G\n#SBATCH --time=10:00:00\n#SBATCH --output=slurm.stdout\n#SBATCH -p intel\n#SBATCH --workdir=./\n\nmodule load cd-hit/4.6.4\n\ncd-hit-est -i '''\n\nmiddle = \" -o \"\n\nbottom1 = \" -c 0.8 -G 1 -n 3 -d 0 -g 1 -r 1 -T 24 -M 16000\"\nbottom2 = \" -c 0.9 -G 1 -n 5 -d 0 -g 1 -r 1 -T 24 -M 16000\"\n\nin_base = os.path.split(os.path.splitext(sys.argv[1])[0])[1]\nbase = os.path.splitext(sys.argv[1])[0]\nfull1 = top + sys.argv[1] + middle + base + \"_c80\" + bottom1\nfull2 = top + sys.argv[1] + middle + base + \"_c90\" + bottom2\n\nout_handle1 = open(\"cd-hit_\" + in_base + \"-80.sh\", \"w\")\nout_handle2 = open(\"cd-hit_\" + in_base + \"-90.sh\", \"w\")\nprint>>out_handle1, full1\nprint>>out_handle2, full2\nprint>>out_handle2, '\\n\\necho \"Done\"'\nout_handle1.close()\nout_handle2.close()\n","sub_path":"Manuscript/figure/Figure_MITE_auto_promoter/Auto_target/scripts/make_cdhit-prot_sh.py","file_name":"make_cdhit-prot_sh.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"441148482","text":"from django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.db import models\n\n\nclass Post(models.Model):\n author = models.ForeignKey(settings.AUTH_USER_MODEL)\n photo = models.ImageField(upload_to='post', blank=True)\n my_comment = models.TextField()\n created_date = models.DateTimeField(auto_now_add=True)\n modified_date = models.DateTimeField(auto_now=True)\n like_users = models.ManyToManyField(\n settings.AUTH_USER_MODEL,\n related_name='like_posts',\n through='PostLike',\n )\n tags = models.ManyToManyField('Tag', blank=True)\n\n def add_comment(self, user, content):\n return self.comment_set.create(author=user, content=content)\n\n def add_tag(self, tag_name):\n tag, tag_created = Tag.objects.get_or_created(name=tag_name)\n if not self.tags.filter(id=tag.id).exists():\n self.tags.add(tag)\n\n @property\n def like_count(self):\n # 자신을 like 하고있는 user 수 리턴\n return self.like_users.count()\n\n\nclass PostLike(models.Model):\n post = models.ForeignKey(Post)\n user = models.ForeignKey(settings.AUTH_USER_MODEL)\n created_date = models.DateTimeField(auto_now_add=True)\n\n\nclass Comment(models.Model):\n post = models.ForeignKey(Post)\n author = models.ForeignKey(settings.AUTH_USER_MODEL)\n content = models.TextField()\n created_date = models.DateTimeField(auto_now_add=True)\n modified_date = models.DateTimeField(auto_now=True)\n like_users = models.ManyToManyField(\n settings.AUTH_USER_MODEL,\n related_name='like_comments',\n through='CommentLike',\n )\n\n\nclass CommentLike(models.Model):\n comment = models.ForeignKey(Comment)\n user = models.ForeignKey(settings.AUTH_USER_MODEL)\n created_date = models.DateTimeField(auto_now_add=True)\n\n\nclass Tag(models.Model):\n name = models.CharField(max_length=50)\n\n def __str__(self):\n return 'Tag({})'.format(self.name)\n","sub_path":"django_app/post/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"588539141","text":"import itertools\nfrom typing import Tuple, Iterator\n\nimport cv2\nimport math\nimport numpy as np\nimport numpy.ma as ma\nfrom tqdm import tqdm\n\nfrom .pose import Pose\n\n\nclass PoseVisualizer:\n def __init__(self, pose: Pose):\n self.pose = pose\n\n def _draw_frame(self, frame: ma.MaskedArray, frame_confidence: np.ndarray, img) -> np.ndarray:\n avg_color = np.mean(img, axis=(0, 1))\n # print(\"avg_color\", avg_color)\n\n for person, person_confidence in zip(frame, frame_confidence):\n c = person_confidence.tolist()\n idx = 0\n for component in self.pose.header.components:\n colors = [np.array(c[::-1]) for c in component.colors]\n\n def _point_color(p_i: int):\n opacity = c[p_i + idx]\n np_color = colors[p_i % len(component.colors)] * opacity + (1 - opacity) * avg_color\n return tuple([int(c) for c in np_color])\n\n # Draw Points\n for i in range(len(component.points)):\n if c[i + idx] > 0:\n cv2.circle(img=img, center=tuple(person[i + idx]), radius=3,\n color=_point_color(i), thickness=-1)\n\n if self.pose.header.is_bbox:\n point1 = tuple(person[0 + idx].tolist())\n point2 = tuple(person[1 + idx].tolist())\n color = tuple(np.mean([_point_color(0), _point_color(1)], axis=0))\n\n cv2.rectangle(img=img, pt1=point1, pt2=point2, color=color, thickness=2)\n else:\n int_person = person.astype(np.int32)\n # Draw Limbs\n for (p1, p2) in component.limbs:\n if c[p1 + idx] > 0 and c[p2 + idx] > 0:\n point1 = tuple(int_person[p1 + idx].tolist())\n point2 = tuple(int_person[p2 + idx].tolist())\n\n length = ((point1[0] - point2[0]) ** 2 + (point1[1] - point2[1]) ** 2) ** 0.5\n\n color = tuple(np.mean([_point_color(p1), _point_color(p2)], axis=0))\n\n deg = math.degrees(math.atan2(point1[1] - point2[1], point1[0] - point2[0]))\n polygon = cv2.ellipse2Poly(\n (int((point1[0] + point2[0]) / 2), int((point1[1] + point2[1]) / 2)),\n (int(length / 2), 3),\n int(deg),\n 0, 360, 1)\n cv2.fillConvexPoly(img=img, points=polygon, color=color)\n\n idx += len(component.points)\n\n return img\n\n def draw(self, background_color: Tuple[int, int, int] = (255, 255, 255), max_frames: int = None):\n int_data = np.array(np.around(self.pose.body.data.data), dtype=\"int32\")\n for frame, confidence in itertools.islice(zip(int_data, self.pose.body.confidence), max_frames):\n background = np.full((self.pose.header.dimensions.height, self.pose.header.dimensions.width, 3),\n fill_value=background_color,\n dtype=\"uint8\")\n yield self._draw_frame(frame, confidence, img=background)\n\n def draw_on_video(self, background_video: str, max_frames: int = None, blur=False):\n int_data = np.array(np.around(self.pose.body.data.data), dtype=\"int32\")\n\n if max_frames is None:\n max_frames = len(int_data)\n\n cap = cv2.VideoCapture(background_video)\n for frame, confidence in itertools.islice(zip(int_data, self.pose.body.confidence), max_frames):\n _, background = cap.read()\n background = cv2.resize(background, (self.pose.header.dimensions.width, self.pose.header.dimensions.height))\n\n if blur:\n background = cv2.blur(background, (20, 20))\n\n yield self._draw_frame(frame, confidence, background)\n cap.release()\n\n def save_frame(self, f_name: str, frame: np.ndarray):\n cv2.imwrite(f_name, frame)\n\n def save_video(self, f_name: str, frames: Iterator):\n image_size = (self.pose.header.dimensions.width, self.pose.header.dimensions.height)\n out = cv2.VideoWriter(f_name, cv2.VideoWriter_fourcc(*'MP4V'), self.pose.body.fps, image_size)\n for frame in tqdm(frames):\n out.write(frame)\n\n out.release()\n","sub_path":"pose_format/pose_visualizer.py","file_name":"pose_visualizer.py","file_ext":"py","file_size_in_byte":4457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"214948381","text":"from django.conf.urls import include, url\nfrom SenderNeClientAPI.views import ClientAPI , FreeClientAPI\n\n#from django.urls import path\napp_name = 'SenderNeClientAPI'\n\n\nurlpatterns =[\n\n ##### Home ####\n url(r'^client/$',ClientAPI.testConnection , name = 'Home'),\n\n #----------------------------------------------------#\n url(r'^rest-auth/', include('rest_auth.urls')),\n #url(r'^rest-auth/registration/', include('rest_auth.registration.urls', namespace=\"rest_auth.registration\")),\n #----------------------------------------------------#\n\n #url(r'^ContactsManager/test/RemoveAll/$',home.test_RemoveAll , name = 'test.removeAll'),\n #url(r'^ContactsManager/test/remove/whatsContacts/$',home.Test_RemoveWhatsContacts , name = 'test.remove.whatsContacts'),\n\n #----------------- new client ----------------------------#\n url(r'^be_new/$',FreeClientAPI.temp_new_client , name = 'Home.AA'),\n url(r'^socketInfo/(?P[\\w\\-]+)/$',FreeClientAPI.get_token_tempClient , name = 'Home.BB'),\n\n\n]\n\n\n","sub_path":"SenderNeClientAPI/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"268026959","text":"from utils import *\nimport numpy as np\nimport pickle as pk\nfrom tqdm import tqdm\nfrom sklearn.metrics.pairwise import cosine_similarity\n\ntmp = \"\\\"\"\n# allow us to make the mapping between the users_items_matrix and each user\nusers_id = list()\nmovies_id = list()\n\n# make a connection to the database\nconn,cursor = open_conn(\"recomvee\")\n\n\ndef get_movie_index(mv_id):\n \"\"\"\n Get the movie index \n from the movie list\n @mv_id : the given movie id\n \"\"\"\n return movies_id.index(mv_id) if mv_id in movies_id else -1\n\ndef get_user_index(usr_id):\n \"\"\"\n Get the user index \n from the user list\n @usr_id : the given user id\n \"\"\"\n return users_id.index(usr_id) if usr_id in users_id else -1\n\ndef to_list(entry,index):\n \"\"\"\n this is a custom \n list converter that\n convert a list of tuple\n to a list of all the second\n element of each tuple\n @entry : the list of tuple\n \"\"\"\n out = []\n for e in entry:\n out.append(e[index])\n return out\n\ndef get_ids(source):\n \"\"\"\n This function allow to fetch\n each movies and users\n id from the database\n and then store them in a list\n @source : the data source (kaggle or allocine)\n \"\"\"\n usrs_id = set()\n movs_id = set()\n query = (\"SELECT DISTINCT user_id , movie_id FROM rating WHERE source = {}\".format(tmp + source + tmp))\n cursor.execute(query)\n \n for (user_id , movie_id) in cursor:\n usrs_id.add(user_id)\n movs_id.add(movie_id)\n \n return list(usrs_id) , list(movs_id)\n\ndef get_ids_bis(source,size,field=\"movies\"):\n \"\"\"\n This is the second version\n of the get_ids() function.\n It's allow to fetch each data\n with a desired size\n @source : the data source (kaggle or allocine)\n @size : the data size (users size or movies size)\n @field : to set a desired size for user or movie only (movies or users)\n for the field, none means that we are going to fetch all the data \n without a desired size (users and movies)\n \"\"\"\n usrs_id = set()\n movs_id = set()\n\n queryu = (\"SELECT DISTINCT user_id FROM rating WHERE source = {}\".format(tmp + source + tmp))\n querym = (\"SELECT DISTINCT movie_id FROM rating WHERE source = {}\".format(tmp + source + tmp))\n\n if field == \"users\":\n queryu = (\"SELECT DISTINCT user_id FROM rating WHERE source = {} LIMIT {}\".format(tmp + source + tmp, size))\n \n cursor.execute(queryu)\n \n for user_id in cursor:\n usrs_id.add(user_id)\n\n if field == \"movies\":\n querym = (\"SELECT DISTINCT movie_id FROM rating WHERE source = {} LIMIT {}\".format(tmp + source + tmp, size))\n \n cursor.execute(querym)\n\n for movie_id in cursor:\n movs_id.add(movie_id)\n \n return list(usrs_id) , list(movs_id)\n\ndef get_data_size(source):\n \"\"\"\n Allow to get our data\n size from the database\n @source : the data source (kaggle or allocine)\n \"\"\"\n query = (\"SELECT COUNT(DISTINCT movie_id) as movie_id_counts, COUNT(DISTINCT user_id) as user_id_counts FROM rating WHERE source = {}\".format(tmp + source + tmp))\n cursor.execute(query)\n for (movie_id_counts , user_id_counts) in cursor:\n return movie_id_counts , user_id_counts\n\ndef get_users_size(source):\n \"\"\"\n Only get the users size\n @source : the data source (kaggle or allocine)\n \"\"\"\n query = (\"SELECT COUNT(DISTINCT user_id) as user_id_counts FROM rating WHERE source = {}\".format(tmp + source + tmp))\n cursor.execute(query)\n for user_id_counts in cursor:\n return user_id_counts[0]\n\ndef get_items_size(source):\n \"\"\"\n Only get the movies size\n @source : the data source (kaggle or allocine)\n \"\"\"\n query = (\"SELECT COUNT(DISTINCT movie_id) as movie_id_counts FROM rating WHERE source = {}\".format(tmp + source + tmp))\n cursor.execute(query)\n for movie_id_counts in cursor:\n return movie_id_counts[0]\n\ndef create_users_items_matrix(source,size,field):\n \"\"\"\n This function allow to \n create the user_items_matrix\n @source : the data source (kaggle or allocine)\n @size : the data size (users size or movies size)\n @field : to set a desired size for user or movie only\n \"\"\"\n global users_id , movies_id\n \n # we get the dataset size\n if field != \"none\" and size != 0:\n items_size = size if field == \"movies\" else get_items_size(source)\n users_size = size if field == \"users\" else get_users_size(source)\n # init users id set\n users_id , movies_id = get_ids_bis(source,size,field)\n else:\n items_size , users_size = get_data_size(source)\n # init users id set\n users_id , movies_id = get_ids(source)\n\n # init users_items_matrix with the dataset size\n users_items_matrix = np.zeros((users_size, items_size))\n # init the ratings row\n row = np.zeros(items_size)\n # we create our rating query\n query = (\"SELECT movie_id , user_id , rating FROM rating WHERE source = {}\".format(tmp + source + tmp))\n # send the request to the database and then fetch the data\n cursor.execute(query)\n \n # get each user id and movie id and compute the users_items_matrix ceils\n for(movie_id , user_id , rating) in cursor:\n uindex = get_user_index(user_id)\n mindex = get_movie_index(movie_id)\n users_items_matrix[uindex][mindex] = rating if uindex != -1 and mindex != -1 else 0\n\n return users_items_matrix\n\n\ndef create_users_users_matrix(source, size, field, user_user_matrix_output_file, similarity_output_file, th=0.5):\n \"\"\"\n Allow to create the user user similarity matrix\n @source : the data source (kaggle or allocine)\n @size : the data size (users size or movies size)\n @field : to set a desired size for user or movie only\n @th : the default cosine similarity threshold value\n \"\"\"\n # we create first the users_items_matrix\n users_items_matrix = create_users_items_matrix(source,size,field)\n similarity = {}\n\n # now we init our users_users_matrix with the users size\n users_users_matrix = np.eye(len(users_id))\n ui_vect = np.zeros((2,len(movies_id)))\n uj_vect = np.zeros((2,len(movies_id)))\n\n # we now create the user user matrix with cosine similarity formula\n for ui in tqdm(users_id):\n # get the user index first\n i = get_user_index(ui)\n # init the user movies vect from the users_items_matrix\n ui_vect[0] = users_items_matrix[i]\n # init the similarity vect for this user in order to store all user similar to him\n sim_users = []\n for uj in users_id:\n # get the each other user index except the current user because he have a similarity score = 1 with himself\n j = get_user_index(uj) if ui != uj else -1\n # init the other user movies vector from the users_items_matrix\n uj_vect[0] = users_items_matrix[j]\n # compute the cosine similarity between the current user and all others users except him\n cs = cosine_similarity(ui_vect, uj_vect)[0][0]\n # affect this score to the users_users_matrix\n users_users_matrix[i][j] = cs\n # then if this score is >= th, we decide that this other user is similar to the current user by their movies\n if cs >= th: \n sim_users.append(uj)\n\n #we set the similarity dict with the current user id and his list of similar user\n similarity[ui] = sim_users\n\n # finally we serialize both users_users_matrix and the similarity dict\n user_user_file = open(user_user_matrix_output_file, 'wb')\n similarity_file = open(similarity_output_file, 'wb')\n \n pk.dump(users_users_matrix, user_user_file)\n pk.dump(similarity, similarity_file)\n \n user_user_file.close()\n similarity_file.close()\n\nsource = 'kaggle'\n\nif source == 'kaggle' :\n user_user_matrix_output_file = \"user_user_kaggle.pkl\"\n similarity_output_file = \"similarity_kaggle.pkl\"\nelif source == 'allocine' : \n user_user_matrix_output_file = \"user_user_allocine.pkl\"\n similarity_output_file = \"similarity_allocine.pkl\"\n \ncreate_users_users_matrix(source, 0, \"none\", user_user_matrix_output_file, similarity_output_file)","sub_path":"recommendation_algorithms/user_user_matrix_creation.py","file_name":"user_user_matrix_creation.py","file_ext":"py","file_size_in_byte":8180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"327480989","text":"import numpy as np\nimport math\nimport testparser\n\n\n# Return R\n# buzai_angle is angle of buzai on roof from x-axis\n# . or ' are point of cut-end\n# ==. is 0\n# '== is 180\n# ref No6 how to calc axis\n# TODO: angle = 0, 90, 180, 270, then return const\ndef get_rotate(buzai_angle):\n buzai_angle = buzai_angle % 360\n if buzai_angle == 0:\n return np.array([[0, 1, 0],\n [0, 0, 1],\n [-1, 0, 0]])\n elif buzai_angle == 90:\n return np.array([[-1, 0, 0],\n [0, 0, 1],\n [0, -1, 0]])\n elif buzai_angle == 180:\n return np.array([[0, -1, 0],\n [0, 0, 1],\n [1, 0, 0]])\n elif buzai_angle == 270:\n return np.array([[1, 0, 0],\n [0, 0, 1],\n [0, 1, 0]])\n\n angle = math.radians(buzai_angle)\n\n def calc_x(angle):\n '''new x = x cos(theta + pi/2) - y sin(theta + pi/2)\nnew y = x sin(theta + pi/2) + y cos(theta + pi/2)\nnew z = z\n\ntips\ncos(theta + pi/2) == - sin(theta)\nsin(theta + pi/2) == cos(theta)\n'''\n return [- math.sin(angle),\n math.cos(angle),\n 0]\n\n def calc_y(angle):\n return [0., 0., 1.]\n\n def calc_z(angle):\n return [- math.cos(angle),\n - math.sin(angle),\n 0]\n\n return np.array([calc_x(angle),\n calc_y(angle),\n calc_z(angle)])\n\n\ndef get_translate(R):\n t = np.array([0, 0, 0])\n return np.r_[np.c_[R, t], np.array([[0, 0, 0, 1]])]\n\n\n# A is translate mat, here this is mean get_translate(R)\n# p is point that you want to translate\ndef translate_from(A, p):\n return np.dot(A, np.r_[p, np.array([1])])[0:3]\n\n# 3x3 ver\n\n\ndef translate(A, p):\n return np.dot(A, p)\n\n\n# if plane is represented ax + by + cz + d = 0, return (a, b, c, d)\n# connection needs p-q-r-s-p\n# in fact, needs 3 points not 4.\n# test\n# p = (0, 0, 1), q = (0, 1, 0), r = (1, -1, 0), optionaly s = (2, -2, -1)\n# must return (-4, -2, -2, -2) or something like (2, 1, 1, 1)\ndef calc_plane_param(p, q, r, s):\n vert_vec = np.cross(q - p,\n s - p)\n return (vert_vec[0],\n vert_vec[1],\n vert_vec[2],\n r.dot(vert_vec))\n\n\n# return degrees\ndef get_senkai(abc):\n b = abc[1]\n c = abc[2]\n\n # if c == 0, this means plain is wrong.\n return math.atan(- b / c)\n\n\ndef get_keisya(abc):\n a = abc[0]\n c = abc[2]\n\n # if c == 0, this means plain is wrong\n return math.atan(- a / c)\n\n\n# trans_mat means translation matrix, usually ret-val of get_rotate\n# senkai_rad means ret-val of get_senkai\ndef get_trans_mat_from_senkai(trans_mat, senkai_rad):\n ret = np.zeros((3, 3))\n ret = trans_mat\n\n y_axis = trans_mat[1]\n y_mut = np.array([[1, 0, 0],\n [0, math.cos(senkai_rad), - math.sin(senkai_rad)],\n [0, math.sin(senkai_rad), math.cos(senkai_rad)]])\n ret[1] = np.dot(y_mut, y_axis)\n\n # ret[1] = np.array((- math.cos(senkai_rad), 0, math.sin(senkai_rad)))\n # print(np.dot(y_mut, y_axis))\n\n ret = np.dot(y_mut, ret)\n\n # print(ret)\n\n return ret\n\n\ndef get_vert(a, b, c, d):\n return calc_plane_param(a, b, c, d)[0:3]\n\n\ndef calc_angle(trans_mat, a, b, c, d, angle_getter):\n return angle_getter(get_vert(trans_mat, a, b, c, d))\n\n\ndef get_angle_vecs(vec1, vec2):\n len1 = np.linalg.norm(vec1)\n len2 = np.linalg.norm(vec2)\n cos = np.dot(vec1, vec2) / (len1 * len2)\n\n if cos > 1:\n if (cos - 1) > 0.0000001:\n print(\"Error! get_angle_vecs: {}\".format(cos))\n exit(1)\n else:\n cos = 1\n\n return math.acos(cos)\n\n\ndef calc_buzai_angle_new(buzai_angle, a, b, c, d):\n trans_mat = get_rotate(buzai_angle)\n\n [new_a, new_b, new_c, new_d] = [\n translate(trans_mat, p) for p in (a, b, c, d)]\n\n plane = get_vert(new_a, new_b, new_c, new_d)\n senkai_rad = get_senkai(plane)\n\n plane = np.array(plane)\n vert = np.array((0, plane[1], plane[2]))\n keisya_rad = get_angle_vecs(vert, plane)\n\n return (math.degrees(senkai_rad), math.degrees(keisya_rad))\n\n\ndef easy_test(a, b, c, d):\n (senkai, keisya) = calc_buzai_angle_new(0, a, b, c, d)\n print(\"旋回: \" + str(senkai))\n print(\"傾斜: \" + str(keisya))\n\n\ndef exec_test(a, b, c, d, buzai_angle):\n (senkai, keisya) = calc_buzai_angle_new(buzai_angle, a, b, c, d)\n return (senkai, keisya)\n\n\ndef print_test(param, is_print=False):\n (a, b, c, d, buzai_angle, correct_senkai, correct_keisya) = param\n senkai, keisya = exec_test(a, b, c, d, buzai_angle)\n\n if is_print or senkai != correct_senkai or keisya != correct_keisya:\n print('------------------------------')\n print('a = {}'.format(a))\n print('b = {}'.format(b))\n print('c = {}'.format(c))\n print('d = {}'.format(d))\n print('angle = {}'.format(buzai_angle))\n print('correct senkai = {}'.format(correct_senkai))\n print('senkai = {}'.format(senkai))\n print('correct keisya = {}'.format(correct_keisya))\n print('keisya = {}'.format(keisya))\n print('------------------------------')\n\n return (senkai, keisya)\n\n\nif __name__ == '__main__':\n import sys\n if len(sys.argv) < 2:\n a = np.array((3, 3, 2))\n b = np.array((5, 3, 1))\n c = np.array((5, 2, 2))\n d = np.array((3, 2, 3))\n\n easy_test(a, b, c, d)\n else:\n testfile = sys.argv[1]\n tests = testparser.parse_tests(testfile)\n for case in tests:\n print_test(case, True)\n","sub_path":"calcangle.py","file_name":"calcangle.py","file_ext":"py","file_size_in_byte":5613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"590499273","text":"from random import randint\r\n\r\ndef get_binary_string(binary):\r\n binary = format(binary, '0b')\r\n return '0'*(9-len(binary))+binary\r\n\r\ndef print_scoreboard(table, mask):\r\n row, mask, table = \"\", get_binary_string(mask), get_binary_string(table)\r\n \r\n for i, v in enumerate(table):\r\n row += '-' if mask[i]=='0'else ('X' if v == '1' else 'O')\r\n \r\n if (i+1)% 3 == 0:\r\n print(row)\r\n row = \"\"\r\n \r\ndef get_random_index(l):\r\n p = randint(0, len(l)-1)\r\n return l[p], l[:p]+l[p+1:]\r\n\r\ndef winner(table, a):\r\n masks = [0b111000000, 0b000111000, 0b000000111,\r\n 0b100100100, 0b010010010, 0b001001001,\r\n 0b100010001, 0b001010100]\r\n \r\n for m in masks:\r\n if a & (table & m == m):\r\n return 1\r\n if ~a & ((~table & 0b111111111) & m == m):\r\n return 0\r\n return -1\r\n\r\ndef simulation_tic_tac_toe(M, debug=True):\r\n prob = dict()\r\n prob.update({'X': 0, 'O': 0, 'DRAW': 0})\r\n \r\n for i in range(M):\r\n base, mask, number, actual = list(range(9)), 0, 0, 1 \r\n \r\n while len(base) > 0:\r\n p, base = get_random_index(base)\r\n mask |= 1 << p\r\n number = number | 1 << p if actual else number\r\n update = winner(number & mask if actual else number | (~mask & 0b111111111), actual)\r\n \r\n if debug:\r\n print_scoreboard(number, mask)\r\n print(\"\")\r\n \r\n if update == 1:\r\n if debug: \r\n print(\"X WINS!\")\r\n prob['X'] += 1\r\n break\r\n \r\n if update == 0:\r\n if debug:\r\n print(\"O WINS!\")\r\n prob['O'] += 1\r\n break\r\n \r\n actual = int(not(actual))\r\n \r\n if winner(number & mask if actual else number | (~mask & 0b111111111), actual) == -1:\r\n prob['DRAW'] += 1\r\n if debug:\r\n print(\"DRAW\")\r\n return prob\r\n\r\nM = 2\r\nprobs = simulation_tic_tac_toe(M, debug = True)\r\n\r\nfor k, prob in probs.items():\r\n print(\"P(Y=\"+str(k)+\")=\"+str(prob)+\"/\"+str(M)+\"=\"+\"%.2f\"%(1.0*prob/M))","sub_path":"bool_tic_tac_toe.py","file_name":"bool_tic_tac_toe.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"485965792","text":"from psycopg2 import connect\nimport requests\nimport json\n\ndef extract_sqlmetadata(cid): \n # Get container info \n r = requests.get(\n\t\"http://fda-container-managing-service:9091/api/container\",\n params = {\"id\":cid}\n )\n \n # ContainerID \n #cid = r.json()['id']\n\n # Connecting to database \n conn=connect(\n dbname = r.json()['internalName'], \n user = \"postgres\",\n host = r.json()['adresses'][0]['ipv4'],\n password = \"postgres\"\n )\n\n cursor = conn.cursor() \n\n # Extract database title\n cursor.execute(f\"\"\"Select table_catalog \n from information_schema.tables;\"\"\"\n )\n dbtitle = cursor.fetchone()\n\n # Extract table names, number of tables \n cursor.execute(f\"\"\"SELECT columns.table_name, count(columns.column_name)\n FROM information_schema.columns \n WHERE table_schema='public'\n GROUP BY columns.table_name;\"\"\"\n )\n res = cursor.fetchall()\n # Merge table names with containerid for inserting tuples \n tblnames=[]\n for item in res:\n tblnames.append((cid,item[0],item[1]))\n print(tblnames)\n\n # Extract column information \n cursor.execute(f\"\"\"SELECT columns.table_name, columns.column_name, columns.data_type \n FROM information_schema.columns \n WHERE table_schema='public';\"\"\"\n )\n res2 = cursor.fetchall() \n # Merge column information with containerid for inserting tuples \n colnames=[]\n for item in res2:\n colnames.append((cid,item[0],item[1], item[2]))\n print(colnames)\n\n conn.close()\n\n # Connect to Meta database \n r = requests.get(\n \"http://localhost:9091/api/getDatabaseContainerByContainerID\", \n params = {\"containerID\":\"Metadatabase\"}\n )\n\n r.json()\n\n conn=connect(\n dbname=\"fda\", \n user = \"postgres\",\n host = \"fda-metadata-db\", \n password = \"postgres\"\n )\n\n cursor = conn.cursor() \n\n # Insert into Table DATABASES \n cursor.execute(\"Insert into DATABASES (DBID,Title) values (%s,%s) ON CONFLICT (DBID) DO UPDATE SET Title=EXCLUDED.Title;\", (cid, dbtitle))\n cursor.execute(\"Select * from Databases;\")\n for i, record in enumerate(cursor): \n print( record )\n conn.commit()\n\n # Prepare and insert tblnames into table TABLES \n records_list_template = ','.join(['%s'] * len(tblnames))\n insert_tblnames = 'Insert into TABLES (tDBID,tName,NumCols) values {} ON Conflict do nothing'.format(records_list_template)\n cursor.execute(insert_tblnames,tblnames)\n\n cursor.execute(\"Select * from Tables;\")\n for i, record in enumerate(cursor): \n print( record )\n conn.commit()\n\n # Prepare and insert into table COLUMNS \n records_list_template = ','.join(['%s'] * len(colnames))\n insert_colnames = 'Insert into COLUMNS (cDBID,tName,cName,Datatype) values {} ON Conflict do nothing'.format(records_list_template)\n cursor.execute(insert_colnames,colnames)\n\n cursor.execute(\"Select * from Columns;\")\n for i, record in enumerate(cursor): \n print( record )\n conn.commit()\n\n conn.close()\n\n","sub_path":"fda-analyse-service/extract_sqlmetadata.py","file_name":"extract_sqlmetadata.py","file_ext":"py","file_size_in_byte":3054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"519122930","text":"from utils.helpers import magic_combine\nfrom models import BaseStatModel\nimport numpy as np\nimport pytorch_lightning as pl\n\nimport torch\nimport torch.nn as nn\n\nfrom nnAudio import Spectrogram\nfrom utils.activation import CustomELU\nfrom utils.layer import Unsqueeze\n\n\nclass C2DConvLSTMStat_V1(BaseStatModel):\n\n STFT_HIDDEN_SIZE = \"stft_hidden_size\"\n STFT_NUM_LAYERS = \"stft_num_layers\"\n\n MEL_SPEC_HIDDEN_SIZE = \"mel_spec_hidden_size\"\n MEL_SPEC_NUM_LAYERS = \"mel_spec_num_layers\"\n\n MFCC_HIDDEN_SIZE = \"mfcc_hidden_size\"\n MFCC_NUM_LAYERS = \"mfcc_num_layers\"\n\n N_FFT = \"n_fft\"\n N_MELS = \"n_mels\"\n N_MFCC = \"n_mfcc\"\n SPEC_TRAINABLE = \"spec_trainable\"\n\n def __init__(self,\n batch_size=32,\n num_workers=4,\n train_ds=None,\n val_ds=None,\n test_ds=None,\n **model_config):\n super().__init__(batch_size, num_workers, train_ds, val_ds, test_ds, **model_config)\n\n self.__build_model()\n \n def __build_model(self):\n\n f_bins = (self.config[self.N_FFT] // 2) + 1\n\n self.stft = Spectrogram.STFT(n_fft=self.config[self.N_FFT], fmax=9000, sr=22050, trainable=self.config[self.SPEC_TRAINABLE], output_format=\"Magnitude\")\n self.mel_spec = Spectrogram.MelSpectrogram(sr=22050, n_fft=self.config[self.N_FFT], n_mels=self.config[self.N_MELS], trainable_mel=self.config[self.SPEC_TRAINABLE], trainable_STFT=self.config[self.SPEC_TRAINABLE])\n self.mfcc = Spectrogram.MFCC(sr=22050, n_mfcc=self.config[self.N_MFCC])\n\n self.stft_feature_extractor = nn.Sequential(\n Unsqueeze(1),\n\n nn.Conv2d(in_channels=1, out_channels=16, kernel_size=(3, 3), stride=(1, 1)),\n nn.BatchNorm2d(16),\n nn.AvgPool2d(kernel_size=(3, 3), stride=(2, 2)),\n nn.Dropout2d(self.config[self.DROPOUT]),\n nn.ReLU(),\n\n nn.Conv2d(in_channels=16, out_channels=32, kernel_size=(3, 3), stride=(1, 1)),\n nn.BatchNorm2d(32),\n nn.AvgPool2d(kernel_size=(3, 3), stride=(2, 2)),\n nn.Dropout2d(self.config[self.DROPOUT]),\n nn.ReLU(),\n\n nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 3), stride=(1, 1)),\n nn.BatchNorm2d(64),\n nn.AvgPool2d(kernel_size=(3, 3), stride=(2, 2)),\n nn.Dropout2d(self.config[self.DROPOUT]),\n nn.ReLU(),\n\n nn.Conv2d(in_channels=64, out_channels=128, kernel_size=(3, 3), stride=(1, 1)),\n nn.BatchNorm2d(128),\n nn.AvgPool2d(kernel_size=(3, 3), stride=(2, 2)),\n nn.Dropout2d(self.config[self.DROPOUT]),\n nn.ReLU(),\n\n nn.Conv2d(in_channels=128, out_channels=128, kernel_size=(3, 3), stride=(1, 1)),\n nn.BatchNorm2d(128),\n nn.AvgPool2d(kernel_size=(3, 3), stride=(2, 2)),\n nn.Dropout2d(self.config[self.DROPOUT]),\n nn.ReLU(),\n\n nn.Conv2d(in_channels=128, out_channels=128, kernel_size=(3, 3), stride=(1, 1)),\n nn.BatchNorm2d(128),\n nn.AvgPool2d(kernel_size=(3, 3), stride=(2, 2)),\n nn.Dropout2d(self.config[self.DROPOUT]),\n nn.ReLU()\n )\n\n self.mel_spec_feature_extractor = nn.Sequential(\n Unsqueeze(1),\n\n nn.Conv2d(in_channels=1, out_channels=16, kernel_size=(3, 3), stride=(1, 1)),\n nn.BatchNorm2d(16),\n nn.AvgPool2d(kernel_size=(3, 3), stride=(2, 2)),\n nn.Dropout2d(self.config[self.DROPOUT]),\n nn.ReLU(),\n\n nn.Conv2d(in_channels=16, out_channels=32, kernel_size=(3, 3), stride=(1, 1)),\n nn.BatchNorm2d(32),\n nn.AvgPool2d(kernel_size=(3, 3), stride=(2, 2)),\n nn.Dropout2d(self.config[self.DROPOUT]),\n nn.ReLU(),\n\n nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 3), stride=(1, 1)),\n nn.BatchNorm2d(64),\n nn.AvgPool2d(kernel_size=(3, 3), stride=(2, 2)),\n nn.Dropout2d(self.config[self.DROPOUT]),\n nn.ReLU(),\n\n nn.Conv2d(in_channels=64, out_channels=128, kernel_size=(3, 3), stride=(1, 1)),\n nn.BatchNorm2d(128),\n nn.AvgPool2d(kernel_size=(3, 3), stride=(2, 2)),\n nn.Dropout2d(self.config[self.DROPOUT]),\n nn.ReLU(),\n\n nn.Conv2d(in_channels=128, out_channels=128, kernel_size=(3, 3), stride=(1, 1)),\n nn.BatchNorm2d(128),\n nn.AvgPool2d(kernel_size=(3, 3), stride=(2, 2)),\n nn.Dropout2d(self.config[self.DROPOUT]),\n nn.ReLU()\n )\n\n self.mfcc_feature_extractor = nn.Sequential(\n Unsqueeze(1),\n\n nn.Conv2d(in_channels=1, out_channels=16, kernel_size=(3, 3), stride=(1, 1)),\n nn.BatchNorm2d(16),\n nn.AvgPool2d(kernel_size=(3, 3), stride=(1, 2)),\n nn.Dropout2d(self.config[self.DROPOUT]),\n nn.ReLU(),\n\n nn.Conv2d(in_channels=16, out_channels=32, kernel_size=(3, 3), stride=(1, 1)),\n nn.BatchNorm2d(32),\n nn.AvgPool2d(kernel_size=(3, 3), stride=(1, 2)),\n nn.Dropout2d(self.config[self.DROPOUT]),\n nn.ReLU(),\n\n nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 3), stride=(1, 1)),\n nn.BatchNorm2d(64),\n nn.AvgPool2d(kernel_size=(3, 3), stride=(1, 2)),\n nn.Dropout2d(self.config[self.DROPOUT]),\n nn.ReLU(),\n\n nn.Conv2d(in_channels=64, out_channels=128, kernel_size=(3, 3), stride=(1, 1)),\n nn.BatchNorm2d(128),\n nn.AvgPool2d(kernel_size=(3, 3), stride=(2, 2)),\n nn.Dropout2d(self.config[self.DROPOUT]),\n nn.ReLU(),\n\n )\n\n self.stft_lstm = nn.LSTM(\n input_size=128 * 5,\n hidden_size=self.config[self.STFT_HIDDEN_SIZE],\n num_layers=self.config[self.STFT_NUM_LAYERS]\n )\n\n self.mel_spec_lstm = nn.LSTM(\n input_size=128 * 1,\n hidden_size=self.config[self.MEL_SPEC_HIDDEN_SIZE],\n num_layers=self.config[self.MEL_SPEC_NUM_LAYERS]\n )\n\n self.mfcc_lstm = nn.LSTM(\n input_size=128 * 2,\n hidden_size=self.config[self.MFCC_HIDDEN_SIZE],\n num_layers=self.config[self.MFCC_NUM_LAYERS]\n )\n\n input_size = self.config[self.STFT_HIDDEN_SIZE]\n input_size += self.config[self.MEL_SPEC_HIDDEN_SIZE]\n input_size += self.config[self.MFCC_HIDDEN_SIZE]\n\n self.fc = nn.Sequential(\n nn.Linear(in_features=input_size, out_features=512),\n nn.Dropout(p=self.config[self.DROPOUT]),\n nn.ReLU(),\n nn.Linear(in_features=512, out_features=128),\n nn.ReLU()\n )\n\n self.fc_mean = nn.Sequential(\n nn.Linear(in_features=128, out_features=2)\n )\n\n self.fc_std = nn.Sequential(\n nn.Linear(in_features=128, out_features=2),\n self._get_std_activation()\n )\n\n def forward(self, x):\n\n stft_x = self.stft(x)\n stft_x = self.stft_feature_extractor(stft_x)\n\n mel_x = self.mel_spec(x)\n mel_x = self.mel_spec_feature_extractor(mel_x)\n\n mfcc_x = self.mfcc(x)\n mfcc_x = self.mfcc_feature_extractor(mfcc_x)\n\n stft_x = magic_combine(stft_x, 1, 3)\n mel_x = magic_combine(mel_x, 1, 3)\n mfcc_x = magic_combine(mfcc_x, 1, 3)\n\n stft_x = stft_x.permute((0, 2, 1))\n mel_x = mel_x.permute((0, 2, 1))\n mfcc_x = mfcc_x.permute((0, 2, 1))\n\n (out, _) = self.stft_lstm(stft_x)\n stft_x = out[:, -1, :]\n\n (out, _) = self.mel_spec_lstm(mel_x)\n mel_x = out[:, -1, :]\n\n (out, _) = self.mfcc_lstm(mfcc_x)\n mfcc_x = out[:, -1, :]\n\n x = torch.cat((stft_x, mel_x, mfcc_x), dim=1)\n\n x = self.fc(x)\n x_mean = self.fc_mean(x)\n x_std = self.fc_std(x)\n x = torch.cat((x_mean, x_std), dim=1)\n return x\n","sub_path":"models/n2dconv_lstm/stat/c/model_v1.py","file_name":"model_v1.py","file_ext":"py","file_size_in_byte":8015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"587189842","text":"import logging\nimport numpy as np\n\nfrom ibllib.io.extractors import biased_trials\nfrom ibllib.io.extractors.base import BaseBpodTrialsExtractor\n\n_logger = logging.getLogger('ibllib')\n\n\nclass LaserBool(BaseBpodTrialsExtractor):\n \"\"\"\n Extracts the laser probabilities from the bpod jsonable\n \"\"\"\n save_names = ('_ibl_trials.laser_stimulation.npy', '_ibl_trials.laser_probability.npy')\n var_names = ('laser_stimulation', 'laser_probability')\n\n def _extract(self, **kwargs):\n lstim = np.array([np.float(t.get('laser_stimulation', np.NaN)) for t in self.bpod_trials])\n lprob = np.array([np.float(t.get('laser_probability', np.NaN)) for t in self.bpod_trials])\n _logger.info('Extracting laser datasets')\n if np.all(np.isnan(lprob)):\n # this prevents the file from being saved when no data\n self.save_names = ('_ibl_trials.laser_stimulation.npy', None)\n _logger.warning('No laser probability found in bpod data')\n if np.all(np.isnan(lstim)):\n # this prevents the file from being saved when no data\n self.save_names = (None, '_ibl_trials.laser_probability.npy')\n _logger.warning('No laser stimulation found in bpod data')\n return lstim, lprob\n\n\ndef extract_all(*args, extra_classes=None, **kwargs):\n \"\"\"\n Extracts the biased trials for a training session\n \"\"\"\n if extra_classes is not None:\n extra_classes.append(LaserBool)\n else:\n extra_classes = [LaserBool]\n return biased_trials.extract_all(*args, **kwargs, extra_classes=extra_classes)\n","sub_path":"ibllib/io/extractors/opto_trials.py","file_name":"opto_trials.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"358240685","text":"from django.conf.urls import include, url\nfrom django.contrib import admin\nfrom .views import (\n transaction_list,\n transaction_view,\n transaction_create\n )\n\nurlpatterns = [\n url(r'^$', transaction_list),\n url(r'^view/$', transaction_view),\n url(r'^new/$', transaction_create),\n]","sub_path":"transactions/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"40480904","text":"from beets.library import Library\nfrom unidecode import unidecode\nfrom subprocess import PIPE, Popen\nfrom beets.library import PathType\nfrom os import path, remove\nfrom shutil import copy2, rmtree\nfrom inspect import getfile, currentframe\nfrom os import listdir\nfrom os.path import isfile, join\nimport os\ndef reset_beets():\n remove(path.expanduser('~/.config/beets/state.pickle'))\n\n\"\"\"\n#Klasa album przechowuje ['artpath'] jako objekt PathType\n#PathType.format(path) konwertuje path do stringa\n#Funkcja napisana aby kod był przejrzystszy\n\"\"\"\ndef path_to_str(path):\n pathconverter = PathType()\n return pathconverter.format(path)\n\ndef get_str_paths(albums):\n str_paths = []\n for album in albums:\n str_paths.append(path_to_str(album.artpath))\n return str_paths\n\n\ndef get_server_path():\n serverpath = path.dirname(path.abspath(getfile(currentframe())))\n serverpath = serverpath[:-7] # takes 'python/' away\n return serverpath\n\n\n\"\"\"\nNa wejściu wprowadzadź obiekt biblioteki\n#Funkcja kopiuje okładki albumów do folderu ./static/images,\n#a następnie nadpisuje album.artpath każdego albumu biblioteki \n#do formatu html-friendly (../static/images)\n#Zwraca liste ścieżek\n\"\"\"\ndef get_covers(albumlist):\n nonepath = '../static/images/image-not-found.jpg'\n paths = []\n imagespath = get_server_path()\n for album in albumlist:\n album_art_path = path_to_str(album.artpath)\n if path.exists(album_art_path):\n newCoverPath = \"/static/images/art\" + str(album.id) + \".jpg\"\n if path.exists(imagespath+newCoverPath):\n remove(imagespath+newCoverPath)\n copy2(album_art_path, imagespath+newCoverPath)\n paths.append('..'+newCoverPath)\n else:\n paths.append(nonepath)\n return paths\n\n\n\ndef get_library():\n \"\"\"\n #ponieważ config.yaml na różnych systemach znajduje sie w różnych miejscach,\n #lepiej wywolac beet config niz bezposrednio otwierac plik open('/home/dominik/.config/beets/config.yaml', 'r')\n \"\"\"\n p = Popen(['beet','config'], stdin=PIPE, stdout=PIPE, stderr=PIPE, bufsize=1)\n\n for line in p.stdout:\n line = line.decode('UTF-8')[:-1]\n if \"directory: \" in line:\n line = line[11:]\n if not path.exists(line):\t\t\t#w przypadku gdy ścieżka podana jest z użyciem '~/'\n line = path.expanduser(line)\n return line\n\n\ndef get_database():\n \"\"\"\n #ponieważ config.yaml na różnych systemach znajduje sie w różnych miejscach,\n #lepiej wywolac beet config niz bezposrednio otwierac plik open('/home/dominik/.config/beets/config.yaml', 'r')\n \"\"\"\n p = Popen(['beet','config'], stdin=PIPE, stdout=PIPE, stderr=PIPE, bufsize=1)\n for line in p.stdout:\n line = line.decode('UTF-8')[:-1]\n if \"library: \" in line:\n line = line[9:]\n if not path.exists(line):\t\t\t#w przypadku gdy ścieżka podana jest z użyciem '~/'\n line = path.expanduser(line)\n return line\n\ndef beetImport(path='.', logs=0):\n #lista id nowo dodanych albumow\n albumsId = []\n pathname=get_library()+'/*.*'\n print(pathname)\n print(\"werrrrrrrrrrrrrrrrryloooooooooooooooong\")\n \"\"\"onlyfiles = [f for f in listdir(path) if isfile(join(path, f))]\n print(onlyfiles)\n onlydirs=[ name for name in os.listdir(path) if os.path.isdir(os.path.join(path, name)) ]\n print(onlydirs)\"\"\"\n\n p = Popen(['beet','import', path, '-A', '-P', '-i', 'c'], stdin=PIPE, stdout=PIPE, stderr=PIPE, bufsize=1)\n\n \"\"\"p = Popen(['beet', 'import', path, '-Pqgs'], stdin=PIPE, stdout=PIPE, stderr=PIPE, bufsize=1)\n for line in p.stdout.readlines():\n print(line)\n p = Popen(['beet', 'import',path], stdin=PIPE, stdout=PIPE, stderr=PIPE, bufsize=1)\n for line in p.stdout.readlines():\n print(line)\"\"\"\n if logs == 1: print('wykonano import\\nsciezki zaimportowanych plikow:')\n albumPath = []\n for line in p.stderr:\n #zapisywanie ścieżek albumów do listy. ścieżki typu byte\n albumPath.append(line.decode('UTF-8')[:-1])\n if logs == 1: print(line.decode('UTF-8')[:-1])\n for album in albumPath:\n #wyświetla id albumu\n a = Popen(['beet', 'list', album, '-a', '-f', '$id'], stdin=PIPE, stdout=PIPE, stderr=PIPE, bufsize=1)\n for id in a.stdout:\n albumsId.append(int(id.decode('UTF-8')[:-1]))\n if logs == 1: print(albumsId)\n return albumsId\n\n\n\"\"\"\nFunkcja napisana aby ułatwić HTMLowi wyświetlanie obrazu\nAlbum.artpath przechowywane jest jako obiekt klasy PathType\npack_albums() przyjmuje jako atrybut liste albumów,\nz każdego wyciąga artpath, konwertuje go do stringa i\ntworzy podlisty [album,ścieżka]. Zwracana jest lista podlist.\n\n#W przyszlosci rozbudowane zostanie o liste zdalnych repozytoriow\n\"\"\"\ndef pack_albums(albums):\n paths = get_covers(albums)\n albums_packed = []\n for i, album in enumerate(albums):\n pack = [album, paths[i]]\n albums_packed.append(pack)\n return albums_packed\n\n\n\"\"\"\nTo samo co pack_albums ale zwraca liste podlist [album, ścieżka, items, items_number_so_far]\n\"\"\"\ndef pack_albums_items(albums):\n paths = get_covers(albums)\n items_packed = []\n items_count = []\n for i, album in enumerate(albums):\n items = []\n for item in album.items():\n items.append(item)\n items_count.append(item)\n pack = [album, paths[i], items, len(items_count)]\n items_packed.append(pack)\n\n return items_packed","sub_path":"python/beetsCommands.py","file_name":"beetsCommands.py","file_ext":"py","file_size_in_byte":5533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"444202913","text":"# The following code has been adapted from code provided in the README.rst file on the Twython GitHub page\r\n# which can be found at the following URL:\r\n# https://github.com/ryanmcgrath/twython\r\nfrom twython import Twython\r\nimport glob\r\nimport random\r\nCONSUMER_KEY = ''\r\nCONSUMER_SECRET = ''\r\nACCESS_KEY = ''\r\nACCESS_SECRET = ''\r\n\r\ntwitter = Twython(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_KEY, ACCESS_SECRET)\r\n\r\npicNum = 0;\r\nfh = open(\"/home/root/server/picnumber.txt\", \"r\") # open the file picnumber.txt in read only mode\r\npicNum = fh.read() # read the file into the picNum variable\r\n\r\nstr1 = \"/home/root/server/pictures/pic\"\r\nstr2 = str1+ str(picNum)\r\nstr3 = str2 + \".jpg\" # concatenate these strings together to get the name and location of the picture to post\r\n\r\nphoto = open(str3, 'rb') # open the picture and read the binary\r\nresponse = twitter.upload_media(media=photo)\r\ntwitter.update_status(status='Here\\'s a picture I just took!\"', media_ids=[response['media_id']]) # update the status with the picture\r\n","sub_path":"TweetPic.py","file_name":"TweetPic.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"52857223","text":"import sys\nimport random\n\nsys.path.append('src')\nsys.path.append('tests')\nsys.path.append('src/strategies/level_3')\nfrom game import Game\n\nfrom numbers_berserker_level_3 import NumbersBerserkerLevel3\nfrom camper_level_3 import CamperLevel3\nfrom elijah_level_3 import ElijahLevel3\n\nfrom player import Player\nfrom otest import cstring\n\nprint(\"Playing games...\")\n\ndef matchup(type1, type2):\n print(cstring(f\"\\n &5 {type1.__name__} vs {type2.__name__}\"))\n wins = [0, 0, 0]\n games = 100\n winlog = False\n for i in range(games):\n first_player = 0 if i < games//2 else 1\n random.seed(i+1)\n log = i in []\n # log = True\n game = Game((7, 7), logging=log, rendering=False, game_level=3, die_size=10, debug_mode=False)\n p1 = Player(type1(first_player), \"Player1\", (3, 0), game)\n p2 = Player(type2(1-first_player), \"Player2\", (3, 6), game)\n if first_player == 0:\n game.add_player(p1)\n game.add_player(p2)\n else:\n game.add_player(p2)\n game.add_player(p1)\n\n game.start()\n\n if game.run_until_completion(max_turns=100):\n if winlog: print(type(game.winner.strat).__name__, i)\n wins[[type1, type2].index(type(game.winner.strat))] += 1\n else:\n if winlog: print(\"tie\", i)\n wins[2] += 1\n\n if log:\n input()\n wins = [w/games for w in wins]\n return wins\n\n\nprint(matchup(NumbersBerserkerLevel3, ElijahLevel3))\n\n# print(matchup(CamperLevel3, ElijahLevel3))\n","sub_path":"analysis/level_3_matchups.py","file_name":"level_3_matchups.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"530278813","text":"from django.conf.urls import patterns, url\nfrom useradmin import views\n\nurlpatterns = patterns('',\n url(r'^$', views.index, name='index'),\n url(r'^login$', views.login, name='login'),\n url(r'^weibo_login$',views.weibologin, name='weibologin'),\n url(r'^register$', views.register_form, name='register_form'),\n url(r'^register/result$', views.register, name='register'),\n url(r'^logout$', views.logout, name='logout'),\n url(r'^callback$', views.callback, name='callback'),\n)\n","sub_path":"1/useradmin/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"297717986","text":"import copy\nimport numpy as np\nfrom unitary import *\nimport time\n\nclass CompilerResult:\n\n def __init__(self, compiled_sequence, cost_by_step, total_elapsed_time):\n self.compiled_sequence = compiled_sequence\n self.cost_by_step = cost_by_step\n self.total_elapsed_time = total_elapsed_time\n\nclass Compiler:\n\n def __init__(self, dimension):\n assert dimension > 0\n self.dimension = dimension\n self.unitary_primitives = []\n\n def set_unitary_primitives(self, unitary_primitives):\n assert isinstance(unitary_primitives, list) or isinstance(unitary_primitives, np.ndarray)\n assert np.all([isinstance(primitive, UnitaryPrimitive) for primitive in unitary_primitives])\n assert np.all([primitive.get_unitary().get_dimension() <= self.dimension for primitive in unitary_primitives])\n\n self.unitary_primitives = copy.deepcopy(unitary_primitives)\n\n def compile(self, target_unitary, threshold=None, max_step_count=np.iinfo(np.int32).max):\n assert isinstance(target_unitary, Unitary)\n assert self.unitary_primitives\n\n initial_time = time.perf_counter()\n compiled_sequence, cost_by_step = self._compile(target_unitary, threshold, max_step_count)\n total_elapsed_time = time.perf_counter() - initial_time\n\n result = CompilerResult(compiled_sequence, cost_by_step, total_elapsed_time)\n return result\n\n def compile_layered(self, target_unitary, unitary_primitive_counts, threshold=None, max_step_count=np.iinfo(np.int32).max):\n assert isinstance(target_unitary, Unitary)\n assert isinstance(unitary_primitive_counts, dict)\n\n initial_time = time.perf_counter()\n compiled_sequence, cost_by_step = self._compile_layered(target_unitary, unitary_primitive_counts, threshold, max_step_count)\n total_elapsed_time = time.perf_counter() - initial_time\n\n result = CompilerResult(compiled_sequence, cost_by_step, total_elapsed_time)\n return result\n\n def _compile(self, target_unitary, threshold, max_step_count):\n raise NotImplementedError\n","sub_path":"paper/fig11/compiler.py","file_name":"compiler.py","file_ext":"py","file_size_in_byte":2100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"206644554","text":"import torch, torchvision \nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.transforms as transforms\n\nfrom torch.autograd import Variable \nfrom torch.utils.data import Dataset, DataLoader\n\nimport numpy as np\n\n# Device \ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n# Data set and Data loader \ntransform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\ntrainset = torchvision.datasets.MNIST(root = './data',\n train = True,\n download = True, \n transform = transform)\n\ntestset = torchvision.datasets.MNIST(root = './data',\n train = False, \n download = True, \n transform = transform)\n\ntrainloader = DataLoader(trainset, batch_size=8, shuffle=True, num_workers=2)\n\ntestloader = DataLoader(testset, batch_size=8, shuffle=True, num_workers=2)\n\n# hyper_parameter\nclasses = ('0','1','2','3','4','5','6','7','8','9')\nnum_epochs = 5\nnum_classes = len(classes) \nbatch_size = 100 \nlearning_rate = 0.001\n\n# My_CNN network \nclass myNet(nn.Module):\n def __init__(self):\n super(myNet,self).__init__()\n \n self.layer1 = nn.Sequential(\n nn.Conv2d(1, 10, kernel_size=5),\n nn.MaxPool2d(kernel_size=2),\n nn.ReLU()\n )\n \n self.layer2 = nn.Sequential(\n nn.Conv2d(10,20, kernel_size=5),\n nn.Dropout2d(p=0.5),\n nn.MaxPool2d(kernel_size=2),\n nn.ReLU()\n )\n \n self.layer3 = nn.Sequential(\n nn.Linear(320,50),\n nn.ReLU(),\n nn.Dropout2d(p=0.5)\n )\n \n self.layer4 = nn.Sequential(\n nn.Linear(50,10),\n nn.LogSoftmax(dim = 1)\n )\n \n def forward(self, x):\n x = self.layer1(x)\n x = self.layer2(x)\n x = x.view(x.shape[0], -1)\n x = self.layer3(x)\n return self.layer4(x)\n \n# Train\nmodel = myNet().to(device)\n\nLoss = nn.CrossEntropyLoss() \nOptimizer = torch.optim.Adam(model.parameters(), lr = learning_rate)\n\ntotal_step = len(trainloader)\n\nfor epoch in range(num_epochs):\n for i, (images, labels) in enumerate(trainloader):\n images = images.to(device)\n labels = labels.to(device)\n\n # Forward pass \n outputs = model(images)\n loss = Loss(outputs, labels)\n\n # Backward and optimize\n Optimizer.zero_grad()\n loss.backward()\n Optimizer.step()\n\n if (i+1) % 1000 == 0 : \n print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'. format(epoch, num_epochs, i+1, total_step, loss.item()))\n\n# Test \nmodel.eval()\n\nwith torch.no_grad():\n correct = 0 \n total = 0 \n for images, labels in testloader:\n images = images.to(device)\n labels = labels.to(device)\n outputs = model(images)\n _, predicted = torch.max(outputs.data , 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n \n print(\"Test Accuracy of the model on the 10000 images : {} %\".format(100*correct/total))\n\n","sub_path":"mnist_example/my_mnist.py","file_name":"my_mnist.py","file_ext":"py","file_size_in_byte":3292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"546589375","text":"import os\nfrom flask import Flask, flash, request, render_template, url_for, redirect, make_response\n\nimport os\nimport base64\nimport hmac\nimport hashlib\nimport json\nimport datetime\nimport uuid\n\n# import Delorean\n\n################################################################################\n\napp = Flask(__name__)\n\napp.config['AWS_ACCESS_KEY_ID'] = os.environ['AWS_ACCESS_KEY_ID']\napp.config['AWS_SECRET_ACCESS_KEY'] = os.environ['AWS_SECRET_ACCESS_KEY']\napp.config['S3_BUCKET_NAME'] = os.environ['S3_BUCKET_NAME']\napp.config['DEBUG'] = os.environ.get('FLASK_DEBUG', False)\n\napp.secret_key = os.environ.get('FLASK_SECRET_KEY', 'secret_key')\n\n################################################################################\n\n@app.route('/')\ndef hello():\n# \tflash(os.environ)\n# \tflash(request.method)\n# \tflash(request.path)\n# \tflash(request.args)\n# \tflash(request.headers)\n\treturn render_template('index.html')\n\n@app.route('/uploaded')\ndef uploaded():\n\n\ttheParameters = {\n\t\t'key': request.args['key'],\n\t\t'bucket': request.args['bucket'],\n\t\t}\n\n\tif request.headers.get('Accept') == 'application/json':\n\t\ttheResponse = make_response(json.dumps(theParameters), 200)\n\t\ttheResponse.headers['Content-Type'] = 'application/json'\n\t\treturn theResponse\n\telse:\n\t\treturn render_template('uploaded.html', **theParameters)\n\n\n@app.route('/upload')\ndef upload():\n\t# via: https://devcenter.heroku.com/articles/s3#file-uploads\n\t# via: http://aws.amazon.com/articles/1434?_encoding=UTF8&jiveRedirect=1\n\n# \tflash(request.method)\n# \tflash(request.path)\n# \tflash(request.args)\n# \tflash(uploaded)\n# \tflash(key)\n# \tflash(bucket)\n\n#\tapp.logger.debug('Request: %s', request)\n\n\tif request.args.get('uploaded', False):\n\t\tflash('Uploaded! %s' % request.args['key'])\n\n\ttheNow = datetime.datetime.utcnow()\n\t# TODO: remove sub-seconds\n\ttheTTL = datetime.timedelta(minutes = 5)\n\ttheExpiration = theNow + theTTL\n\t# TODO: hack alert!\n\ttheNow = theNow.isoformat() + 'Z'\n\ttheExpiration = theExpiration.isoformat() + 'Z'\n\n\tAWS_ACCESS_KEY_ID = app.config['AWS_ACCESS_KEY_ID']\n\tAWS_SECRET_ACCESS_KEY = app.config['AWS_SECRET_ACCESS_KEY']\n\ttheBucket = app.config['S3_BUCKET_NAME']\n\n\tthePath = 'uploads/%s' % uuid.uuid4().hex\n\ttheRedirect = url_for('uploaded', _external = True)\n\ttheACL = 'private'\n\tthePolicy = {\n\t\t'expiration': theExpiration,\n\t\t'conditions': [ \n\t\t\t{'bucket': theBucket}, \n\t\t\t['starts-with', '$key', '%s/' % thePath],\n\t\t\t{'acl': theACL},\n\t\t\t{'success_action_redirect': theRedirect},\n\t\t\t['starts-with', '$Content-Type', ''],\n\t\t\t['content-length-range', 0, 1048576]\n\t\t ]\n\t\t}\n\tthePolicy = json.dumps(thePolicy)\n\tthePolicy = base64.b64encode(thePolicy)\n\n\ttheSignature = base64.b64encode(hmac.new(AWS_SECRET_ACCESS_KEY, thePolicy, hashlib.sha1).digest())\n\n\ttheParameters = {\n\t\t'bucket': theBucket,\n\t\t'policy': thePolicy,\n\t\t'signature': theSignature,\n\t\t'redirect': theRedirect,\n\t\t'acl': theACL,\n\t\t'AWS_ACCESS_KEY_ID': AWS_ACCESS_KEY_ID,\n\t\t'key': '%s/${filename}' % thePath,\n\t\t'url': 'https://%s.s3.amazonaws.com/' % theBucket,\n\t\t'encoding': 'multipart/form-data',\n\t\t'expiration': theExpiration,\n\t\t'now': theNow,\n\t\t'ttl': theTTL.total_seconds(),\n\t\t}\n\n\tif request.headers.get('Accept') == 'application/json':\n\t\ttheResponse = make_response(json.dumps(theParameters), 200)\n\t\ttheResponse.headers['Content-Type'] = 'application/json'\n\t\treturn theResponse\n\telse:\n\t\treturn render_template('upload.html', **theParameters)\n\n################################################################################\n\nif __name__ == '__main__':\n\t# Bind to PORT if defined, otherwise default to 5000.\n\tport = int(os.environ.get('PORT', 5000))\n\tapp.run(host='0.0.0.0', port=port)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"563581874","text":"# 6. Реализовать два небольших скрипта:\n# б) бесконечный итератор, повторяющий элементы некоторого списка, определенного заранее.\n# Подсказка: использовать функцию count() и cycle() модуля itertools.\n\n\nfrom sys import argv\nfrom itertools import cycle\n\nif len(argv) == 1:\n print(\"Передайте программе список чисел, которые надо повторять\")\n exit(0)\n\nfor x in cycle(argv[1:]):\n print(x)\n","sub_path":"lesson4/task6.2.py","file_name":"task6.2.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"362120853","text":"import os\nimport json\nfrom Configuration import Configuration\nfrom Installer import Installer\n\n\nclass Kubernetes(Installer):\n def __init__(self, args):\n super(Kubernetes, self).__init__(args)\n self.config = Configuration()\n\n def set(self, n, v):\n self.config.add(n, v)\n\n def setup_postgres(self):\n if not self.is_master():\n self.set('storage.migration.enabled', False)\n self.required_option('database')\n args = self.args\n\n if self.args.database == 'postgres':\n self.required_option('postgres-host')\n self.required_option('postgres-port')\n self.required_option('postgres-username')\n self.required_option('postgres-password')\n self.required_option('postgres-database')\n self.set('storage.postgres.enabled', True)\n self.set('storage.postgres.host', args.postgres_host)\n self.set('storage.postgres.port', args.postgres_port)\n self.set('storage.postgres.db', args.postgres_database)\n self.set('storage.cloudsql.enabled', False)\n\n secret = {\n \"username\": args.postgres_username,\n \"password\": args.postgres_password\n }\n\n self.create_secret(\"infrabox-postgres\", self.args.general_system_namespace, secret)\n elif args.database == 'cloudsql':\n self.required_option('cloudsql-instance-connection-name')\n self.required_option('cloudsql-proxy-service-account-key-file')\n self.required_option('cloudsql-proxy-username')\n self.required_option('cloudsql-proxy-password')\n self.required_option('postgres-database')\n\n Installer.check_file_exists(args.cloudsql_proxy_service_account_key_file)\n\n self.set('storage.postgres.enabled', False)\n self.set('storage.postgres.host', \"localhost\")\n self.set('storage.postgres.port', 5432)\n self.set('storage.postgres.db', args.postgres_database)\n self.set('storage.cloudsql.instance_connection_name', args.cloudsql_instance_connection_name)\n self.set('storage.cloudsql.enabled', True)\n\n secret = {\n \"username\": args.cloudsql_proxy_username,\n \"password\": args.cloudsql_proxy_password\n }\n\n self.create_secret(\"infrabox-postgres\", self.args.general_system_namespace, secret)\n\n with open(args.cloudsql_proxy_service_account_key_file) as keyfile:\n secret = {\n \"credentials.json\": keyfile.read()\n }\n\n self.create_secret(\"infrabox-cloudsql-instance-credentials\", self.args.general_system_namespace, secret)\n\n else:\n raise Exception('unknown database type')\n\n def setup_storage(self):\n self.required_option('storage')\n args = self.args\n\n if args.storage == 's3':\n self.required_option('s3-access-key')\n self.required_option('s3-secret-key')\n self.required_option('s3-region')\n self.required_option('s3-endpoint')\n self.required_option('s3-port')\n self.required_option('s3-bucket')\n\n self.set('storage.gcs.enabled', False)\n self.set('storage.s3.enabled', True)\n self.set('storage.s3.region', args.s3_region)\n self.set('storage.s3.endpoint', args.s3_endpoint)\n self.set('storage.s3.bucket', args.s3_bucket)\n self.set('storage.s3.port', args.s3_port)\n self.set('storage.s3.secure', args.s3_secure == 'true')\n\n secret = {\n \"secretKey\": args.s3_secret_key,\n \"accessKey\": args.s3_access_key\n }\n\n self.create_secret(\"infrabox-s3-credentials\", self.args.general_system_namespace, secret)\n elif args.storage == 'gcs':\n self.required_option('gcs-service-account-key-file')\n self.required_option('gcs-bucket')\n\n Installer.check_file_exists(args.gcs_service_account_key_file)\n\n self.set('storage.s3.enabled', False)\n self.set('storage.gcs.enabled', True)\n self.set('storage.gcs.bucket', args.gcs_bucket)\n\n with open(args.gcs_service_account_key_file) as keyfile:\n secret = {\n \"gcs_service_account.json\": keyfile.read()\n }\n\n self.create_secret(\"infrabox-gcs\", self.args.general_system_namespace, secret)\n else:\n raise Exception(\"unknown storage\")\n\n def setup_admin_password(self):\n self.required_option('admin-password')\n self.required_option('admin-email')\n\n secret = {\n \"email\": self.args.admin_email,\n \"password\": self.args.admin_password\n }\n\n self.create_secret(\"infrabox-admin\", self.args.general_system_namespace, secret)\n\n def setup_docker_registry(self):\n self.set('docker_registry.nginx_tag', self.args.version)\n self.set('docker_registry.auth_tag', self.args.version)\n\n self.required_option('docker-registry')\n self.set('general.docker_registry', self.args.docker_registry)\n self.set('docker_registry.url', self.args.root_url)\n\n def setup_account(self):\n self.set('account.signup.enabled', self.args.account_signup_enabled)\n\n def setup_local_cache(self):\n self.set('local_cache.enabled', self.args.local_cache_enabled)\n\n if self.args.local_cache_enabled:\n self.required_option('local-cache-host-path')\n self.set('local_cache.host_path', self.args.local_cache_host_path)\n\n def setup_ldap(self):\n if not self.is_master():\n return\n\n if not self.args.ldap_enabled:\n return\n\n self.required_option('ldap-dn')\n self.required_option('ldap-password')\n self.required_option('ldap-base')\n self.required_option('ldap-url')\n\n secret = {\n \"dn\": self.args.ldap_dn,\n \"password\": self.args.ldap_password\n }\n\n self.create_secret(\"infrabox-ldap\", self.args.general_system_namespace, secret)\n\n self.set('account.ldap.enabled', True)\n self.set('account.ldap.base', self.args.ldap_base)\n self.set('account.ldap.url', self.args.ldap_url)\n self.set('account.signup.enabled', False)\n\n def setup_gerrit(self):\n if not self.is_master():\n return\n\n if not self.args.gerrit_enabled:\n return\n\n self.required_option('gerrit-hostname')\n self.required_option('gerrit-port')\n self.required_option('gerrit-username')\n self.required_option('gerrit-private-key')\n\n self.set('gerrit.enabled', True)\n self.set('gerrit.hostname', self.args.gerrit_hostname)\n self.set('gerrit.username', self.args.gerrit_username)\n self.set('gerrit.review.enabled', self.args.gerrit_review_enabled)\n self.set('gerrit.review.tag', self.args.version)\n self.set('gerrit.trigger.tag', self.args.version)\n self.set('gerrit.api.tag', self.args.version)\n\n Installer.check_file_exists(self.args.gerrit_private_key)\n\n secret = {\n \"id_rsa\": open(self.args.gerrit_private_key).read()\n }\n\n self.create_secret(\"infrabox-gerrit-ssh\", self.args.general_system_namespace, secret)\n self.create_secret(\"infrabox-gerrit-ssh\", self.args.general_worker_namespace, secret)\n\n def setup_github(self):\n if not self.is_master():\n return\n\n if not self.args.github_enabled:\n return\n\n self.required_option('github-client-id')\n self.required_option('github-client-secret')\n self.required_option('github-webhook-secret')\n self.required_option('github-api-url')\n self.required_option('github-login-url')\n\n self.set('github.enabled', True)\n self.set('github.login.enabled', self.args.github_login_enabled)\n self.set('github.login.url', self.args.github_login_url)\n self.set('github.api_url', self.args.github_api_url)\n self.set('github.trigger.tag', self.args.version)\n self.set('github.api.tag', self.args.version)\n self.set('github.review.tag', self.args.version)\n self.set('github.login.allowed_organizations', self.args.github_login_allowed_organizations)\n\n secret = {\n \"client_id\": self.args.github_client_id,\n \"client_secret\": self.args.github_client_secret,\n \"webhook_secret\": self.args.github_webhook_secret\n }\n\n self.create_secret(\"infrabox-github\", self.args.general_system_namespace, secret)\n\n def setup_dashboard(self):\n if not self.is_master():\n self.set('dashboard.enabled', False)\n else:\n self.set('dashboard.api.tag', self.args.version)\n self.set('dashboard.url', self.args.root_url)\n\n def setup_api(self):\n self.set('api.url', self.args.root_url + '/api/cli')\n self.set('api.tag', self.args.version)\n\n def setup_static(self):\n if not self.is_master():\n self.set('static.enabled', False)\n else:\n self.set('static.tag', self.args.version)\n\n def setup_general(self):\n self.required_option('general-rsa-private-key')\n self.required_option('general-rsa-public-key')\n\n self.set('general.dont_check_certificates', self.args.general_dont_check_certificates)\n self.set('general.worker_namespace', self.args.general_worker_namespace)\n self.set('general.system_namespace', self.args.general_system_namespace)\n self.set('general.rbac.enabled', not self.args.general_rbac_disabled)\n self.set('general.report_issue_url', self.args.general_report_issue_url)\n self.set('root_url', self.args.root_url)\n\n Installer.check_file_exists(self.args.general_rsa_private_key)\n Installer.check_file_exists(self.args.general_rsa_public_key)\n\n secret = {\n \"id_rsa\": open(self.args.general_rsa_private_key).read(),\n \"id_rsa.pub\": open(self.args.general_rsa_public_key).read()\n }\n\n self.create_secret(\"infrabox-rsa\", self.args.general_system_namespace, secret)\n\n def setup_job(self):\n self.set('job.mount_docker_socket', self.args.job_mount_docker_socket)\n self.set('job.use_host_docker_daemon', self.args.job_use_host_docker_daemon)\n self.set('job.security_context.capabilities.enabled',\n self.args.job_security_context_capabilities_enabled)\n\n self.set('job.api.url', self.args.root_url + '/api/job')\n self.set('job.api.tag', self.args.version)\n\n def setup_db(self):\n self.set('db.tag', self.args.version)\n\n def setup_scheduler(self):\n self.set('scheduler.tag', self.args.version)\n self.set('scheduler.enabled', not self.args.scheduler_disabled)\n\n def setup_cluster(self):\n self.set('cluster.name', self.args.cluster_name)\n self.set('cluster.labels', self.args.cluster_labels)\n\n def setup_ingress(self):\n host = self.args.root_url.replace('http://', '')\n host = host.replace('https://', '')\n\n if not self.args.ingress_tls_host:\n self.args.ingress_tls_host = host.split(':')[0]\n\n self.set('ingress.tls.force_redirect', not self.args.ingress_tls_dont_force_redirect)\n self.set('ingress.tls.enabled', not self.args.ingress_tls_disabled)\n self.set('ingress.tls.host', self.args.ingress_tls_host)\n\n def main(self):\n self.required_option('root-url')\n\n while True:\n if self.args.root_url.endswith('/'):\n self.args.root_url = self.args.root_url[:-1]\n else:\n break\n\n # Copy helm chart\n Installer.copy_files(self.args, 'infrabox')\n\n # Load values\n values_path = os.path.join(self.args.o, 'infrabox', 'values.yaml')\n self.config.load(values_path)\n\n self.setup_general()\n self.setup_admin_password()\n self.setup_storage()\n self.setup_postgres()\n self.setup_docker_registry()\n self.setup_account()\n self.setup_job()\n self.setup_db()\n self.setup_scheduler()\n self.setup_cluster()\n self.setup_gerrit()\n self.setup_github()\n self.setup_dashboard()\n self.setup_api()\n self.setup_static()\n self.setup_ldap()\n self.setup_local_cache()\n self.setup_ingress()\n\n daemon_config = {\n 'disable-legacy-registry': True\n }\n\n if self.args.general_dont_check_certificates:\n registry_name = self.args.root_url.replace('http://', '')\n registry_name = registry_name.replace('https://', '')\n daemon_config['insecure-registries'] = [registry_name]\n daemon_config_path = os.path.join(self.args.o, 'infrabox', 'config', 'docker', 'daemon.json')\n json.dump(daemon_config, open(daemon_config_path, 'w'))\n\n self.config.dump(values_path)\n","sub_path":"deploy/deployModules/Kubernetes.py","file_name":"Kubernetes.py","file_ext":"py","file_size_in_byte":13064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"119455467","text":"from copy import deepcopy\nfrom collections import defaultdict\nimport pytest\nfrom psycopg2.extras import RealDictCursor\n\nfrom ..utils.post import TestGenPost\nfrom ..utils.patch import TestGenPatch\n\n\npost_gen = TestGenPost()\nDATA = post_gen.generate_valid_test(length=25)\npatch_gen = TestGenPatch(src_data=DATA)\n\n\n@pytest.fixture\ndef import_id(client):\n rv = client.post('/imports', json=DATA)\n print(rv.json)\n assert rv.status_code == 201\n return rv.json['data']['import_id']\n\n\n@pytest.mark.parametrize('data, exp_resp', [\n (patch_gen.generate_valid_test(), 200),\n (patch_gen.generate_wrong_relations(mode='float'), 400),\n (patch_gen.generate_wrong_dates(mode='future'), 400),\n (patch_gen.generate_wrong_dates(mode='not_exists'), 400),\n (patch_gen.generate_valid_json_messed(), 200),\n (patch_gen.generate_invalid_data_type_test(field='town', mode='empty'), 400),\n (patch_gen.generate_invalid_data_type_test(field='town', mode='null'), 400),\n (patch_gen.generate_invalid_data_type_test(field='town', mode='wrong_type'), 400),\n (patch_gen.generate_invalid_data_type_test(field='street', mode='empty'), 400),\n (patch_gen.generate_invalid_data_type_test(field='street', mode='null'), 400),\n (patch_gen.generate_invalid_data_type_test(field='street', mode='wrong_type'), 400),\n (patch_gen.generate_invalid_data_type_test(field='building', mode='empty'), 400),\n (patch_gen.generate_invalid_data_type_test(field='building', mode='null'), 400),\n (patch_gen.generate_invalid_data_type_test(field='apartment', mode='null'), 400),\n (patch_gen.generate_invalid_data_type_test(field='apartment', mode='wrong_type'), 400),\n (patch_gen.generate_invalid_data_type_test(field='birth_date', mode='empty'), 400),\n (patch_gen.generate_invalid_data_type_test(field='building', mode='wrong_type'), 400),\n (patch_gen.generate_invalid_data_type_test(field='birth_date', mode='null'), 400),\n (patch_gen.generate_invalid_data_type_test(field='birth_date', mode='wrong_type'), 400),\n (patch_gen.generate_invalid_data_type_test(field='name', mode='empty'), 400),\n (patch_gen.generate_invalid_data_type_test(field='name', mode='null'), 400),\n (patch_gen.generate_invalid_data_type_test(field='name', mode='wrong_type'), 400),\n (patch_gen.generate_invalid_data_type_test(field='gender', mode='empty'), 400),\n (patch_gen.generate_invalid_data_type_test(field='gender', mode='null'), 400),\n (patch_gen.generate_invalid_data_type_test(field='gender', mode='wrong_type'), 400),\n (patch_gen.generate_invalid_data_type_test(field='relatives', mode='empty'), 200),\n (patch_gen.generate_invalid_data_type_test(field='relatives', mode='null'), 400),\n (patch_gen.generate_invalid_data_type_test(field='relatives', mode='wrong_type'), 400),\n (patch_gen.generate_empty_data(), 400),\n (patch_gen.generate_broken_json_extra_field(), 400),\n (patch_gen.generate_broken_json_missing_fields(field='relatives'), 200),\n (patch_gen.generate_longer_test(length=2), 400),\n (patch_gen.generate_unknown_relative(), 400)\n])\ndef test_patch_citizen(client, import_id, data, exp_resp):\n print(data)\n for citizen in DATA['citizens']:\n citizen_id = citizen['citizen_id']\n rv = client.patch(f'/imports/{import_id}/citizens/{citizen_id}', json=data)\n assert rv.status_code == exp_resp\n\n\ndef test_import_id_not_exist(client):\n rv = client.patch(f'/imports/1/citizens/1', json={'town': 'test'})\n assert rv.status_code == 400\n\n\ndef test_citizen_id_not_exist(client, import_id):\n citizen_id = max([citizen['citizen_id'] for citizen in DATA['citizens']]) + 1\n rv = client.patch(f'/imports/{import_id}/citizens/{citizen_id}', json={'town': 'test'})\n assert rv.status_code == 400\n\n\ndef test_empty_payload(client, import_id):\n citizen_id = max([citizen['citizen_id'] for citizen in DATA['citizens']]) + 1\n rv = client.patch(f'/imports/{import_id}/citizens/{citizen_id}')\n assert rv.status_code == 400\n\n\ndef get_relations(conn, import_id, schema='imports'):\n query = f\"\"\"\n select * from {schema}.relation\n where \n import_id = %s\n and is_active is true\n \"\"\"\n with conn.cursor(cursor_factory=RealDictCursor) as cur:\n cur.execute(query, (import_id,))\n res = cur.fetchall()\n\n if not res:\n return\n\n relations = defaultdict(list)\n\n for relation in res:\n relations[relation['citizen_id']].append(relation['relative'])\n\n return relations\n\n\ndef test_leave_relation(client, conn, import_id):\n for citizen in DATA['citizens']:\n citizen_id = citizen['citizen_id']\n relations = get_relations(conn, import_id)\n if citizen_id not in relations:\n continue\n\n rels = relations[citizen_id]\n leave_relation = rels.pop()\n rv = client.patch(f'/imports/{import_id}/citizens/{citizen_id}', json={'relatives': rels})\n\n assert rv.status_code == 200\n\n relations = get_relations(conn, import_id)\n\n if not relations:\n break\n\n rels = relations[citizen_id]\n assert leave_relation not in rels\n rels = relations[leave_relation]\n assert citizen_id not in rels\n\n\ndef test_join_relation(client, conn, import_id):\n for citizen in DATA['citizens']:\n citizen_id = citizen['citizen_id']\n relations = get_relations(conn, import_id)\n print(relations)\n\n join_relation = None\n for cid, rels in relations.items():\n if citizen_id not in rels and citizen_id != cid:\n join_relation = cid\n break\n\n if not join_relation:\n continue\n\n rels = relations[citizen_id]\n rels.append(join_relation)\n\n rv = client.patch(f'/imports/{import_id}/citizens/{citizen_id}', json={'relatives': rels})\n\n assert rv.status_code == 200\n\n relations = get_relations(conn, import_id)\n print(relations)\n\n rels = relations[citizen_id]\n assert join_relation in rels\n rels = relations[join_relation]\n assert citizen_id in rels\n\n\ndef test_join_self_relation(client, conn, import_id):\n citizen = DATA['citizens'][0]\n citizen_id = citizen['citizen_id']\n rels = citizen['relatives']\n rels.append(citizen_id)\n rv = client.patch(f'/imports/{import_id}/citizens/{citizen_id}', json={'relatives': rels})\n\n assert rv.status_code == 200\n\n relations = get_relations(conn, import_id)\n assert citizen_id in relations[citizen_id]\n\n\ndef test_join_and_leave(client, conn, import_id):\n citizen = None\n for ctz in DATA['citizens']:\n if ctz['relatives']:\n citizen = deepcopy(ctz)\n break\n\n if not citizen:\n raise Exception('Unexpected Error. Could not find citizen with relatives')\n\n citizen_id = citizen['citizen_id']\n relatives = citizen['relatives']\n join_relation = None\n for ctz in DATA['citizens']:\n cid = ctz['citizen_id']\n if cid not in relatives and citizen_id != cid:\n join_relation = cid\n break\n\n if not join_relation:\n raise Exception('Unexpected Error. Could not find citizen to join')\n\n leave_relation = relatives.pop()\n relatives.append(join_relation)\n rv = client.patch(f'/imports/{import_id}/citizens/{citizen_id}', json={'relatives': relatives})\n\n assert rv.status_code == 200\n\n relations = get_relations(conn, import_id)\n\n assert leave_relation not in relations[citizen_id]\n assert citizen_id not in relations[leave_relation]\n assert join_relation in relations[citizen_id]\n assert citizen_id in relations[join_relation]\n\n\ndef test_empty_relative(client, import_id):\n citizen = None\n for ctz in DATA['citizens']:\n if ctz['relatives']:\n citizen = deepcopy(ctz)\n break\n\n if not citizen:\n raise Exception('Unexpected Error. Could not find citizen with relatives')\n\n citizen_id = citizen['citizen_id']\n\n rv = client.patch(f'/imports/{import_id}/citizens/{citizen_id}', json={'relatives': []})\n\n assert rv.status_code == 200\n\n assert rv.json['data']['relatives'] == []\n\n\n@pytest.mark.parametrize('field, val', [\n ('town', 'Test'),\n ('street', 'test'),\n ('building', 'test'),\n ('apartment', 123456789),\n ('name', 'test'),\n ('birth_date', '26.12.2000')\n])\ndef test_patch_other_fields(client, import_id, field, val):\n citizen_id = DATA['citizens'][0]['citizen_id']\n rv = client.patch(f'/imports/{import_id}/citizens/{citizen_id}', json={field: val})\n assert rv.status_code == 200\n assert rv.json['data'][field] == val\n","sub_path":"tests/imports/test_citizens.py","file_name":"test_citizens.py","file_ext":"py","file_size_in_byte":8626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"152150183","text":"#Sum of Even and Odd\r\n\r\n'''\r\nWrite a program to calculate the sum of even and odd numbers including its count.\r\n\r\nInput:\r\n1.Total number of Inputs\r\n2. Input elements\r\n\r\nOutput:\r\n1. Odd numbers count\r\n2. Even numbers count\r\n3. Sum of even numbers\r\n4. Sum of odd numbers\r\n'''\r\n\r\nn=int(input())\r\nl=[]\r\nec,oc,es,os=0,0,0,0\r\nfor i in range(n):\r\n l.insert(i,int(input()))\r\n \r\nfor i in range(n):\r\n if (l[i]%2)==0:\r\n ec+=1\r\n es+=l[i]\r\n else:\r\n oc+=1\r\n os+=l[i]\r\n \r\nprint(oc)\r\nprint(ec)\r\nprint(es)\r\nprint(os)\r\n","sub_path":"Python/Sum of Even and Odd.py","file_name":"Sum of Even and Odd.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"58117767","text":"from django.contrib import admin\nfrom django.urls import path\nfrom .import views\n\nurlpatterns = [\n path('Admin/', admin.site.urls),\n path('',views.index,name = 'index'),\n path('Home',views.Home,name='Home'),\n path('Software',views.Software,name='Software'),\n path('Hardware',views.Hardware,name='Hardware'),\n path('Antivirus',views.Antivirus,name='Antivirus'),\n path('About',views.About,name='About'),\n]\n","sub_path":"textutils/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"623836644","text":"from scipy.stats import gamma, beta, norm, truncnorm\nfrom scipy.integrate import quad\nfrom scipy.special import gammainc\nfrom collections import ChainMap\nfrom enum import Enum\nfrom bitarray.util import count_and\nfrom bitarray import bitarray as bitmap\nfrom numpy import linspace\n\nimport matplotlib.pyplot as plt\nimport json\nimport glob\nimport time\nimport math\n\nSET = lambda x, para : para\nMAX = lambda x, para : max(x, para)\nMIN = lambda x, para : min(x, para)\nMULT = lambda x, para : x * para\nDIV = lambda x, para : x / para\nADD = lambda x, para : x + para\nSUBT = lambda x, para : x - para\nPOW = lambda x, para : math.pow(x, para)\nROOT = lambda x, para : math.pow(x, 1 / para)\nLOG = lambda x, para : math.log(x, para)\n\nclass Query(Enum):\n PDF = 1\n CDF = 2\n PPF = 3\n MEAN = 4\n PMEAN = 5\n\nclass Curve:\n def __init__(self, distrib, paras):\n self.distrib = distrib\n self.paras = paras\n self.update = False\n\n self.children = []\n\n for para in self.paras:\n add_helper(para.children, self)\n\n def do_update(self):\n if self.update:\n self.update = False\n \n for para in self.paras:\n para.do_update()\n\n return self\n\n def do_query(self, query, x=None):\n if query == Query.PDF:\n return run_helper(self.do_update().distrib.pdf, self.paras, x)\n elif query == Query.CDF:\n return run_helper(self.do_update().distrib.cdf, self.paras, x)\n elif query == Query.PPF:\n return run_helper(self.do_update().distrib.ppf, self.paras, x)\n elif query == Query.MEAN:\n return run_helper(self.do_update().distrib.mean, self.paras)\n elif query == Query.PMEAN:\n if type(self.distrib) == type(beta):\n return self.paras[2].value / (self.paras[2].value + self.paras[3].value) * beta.cdf(x, self.paras[2].value + 1, self.paras[3].value, self.paras[0].value, self.paras[1].value) / self.do_query(Query.CDF, x)\n elif type(self.distrib) == type(gamma):\n return self.paras[2].value * self.paras[1].value * gammainc(self.paras[2].value + 1, x / self.paras[1].value) / gammainc(self.paras[2].value, x / self.paras[1].value)\n elif type(self.distrib) == type(norm):\n return truncnorm.mean(0, x, self.paras[0], self.paras[1])\n else:\n return quad(lambda x, curve : x * curve.do_query(Query.PDF, x), 0, x, args=(self,))[0] / self.do_query(Query.CDF, x)\n\n return None\n\nclass CurveSum:\n def __init__(self, curves, functions=None, paras=None, functions_mean=None, paras_mean=None):\n self.curves = curves\n self.functions = functions\n self.paras = paras\n self.functions_mean = functions_mean\n self.paras_mean = paras_mean\n self.mean = None\n self.update = True\n\n self.children = []\n\n for curve in self.curves:\n add_helper(curve.children, self)\n\n if self.paras:\n for paras in self.paras:\n for para in paras:\n add_helper(para.children, self)\n if self.paras_mean:\n for paras in self.paras_mean:\n for para in paras:\n add_helper(para.children, self)\n\n self.do_update()\n\n def do_update(self):\n if self.update:\n self.update = False\n\n self.mean = 0\n\n for i, curve in enumerate(self.curves):\n functions = []\n paras = []\n \n if self.paras_mean:\n functions = self.functions_mean[i]\n paras = self.paras_mean[i]\n\n mean = curve.do_query(Query.MEAN)\n\n for ii in range(len(paras)):\n mean = functions[ii](mean, paras[ii].do_update())\n\n self.mean += mean\n\n return self\n\n def do_query(self, query, x=None):\n if query == Query.PDF or query == Query.CDF or query == Query.PPF or query == Query.PMEAN:\n result = 0\n\n for i, curve in enumerate(self.curves):\n functions = []\n paras = []\n\n if self.paras:\n functions = self.functions[i]\n paras = self.paras[i]\n\n t = curve.do_query(query, x)\n\n for ii in range(len(paras)):\n t = functions[ii](t, paras[ii].do_update())\n\n result += t\n\n return result\n elif query == Query.MEAN:\n return self.do_update().mean\n\n return None\n \n\nclass Value:\n def __init__(self, base, functions=None, paras=None):\n self.base = base\n self.functions = functions\n self.paras = paras\n self.value = None\n self.update = True\n\n self.children = []\n\n if self.paras:\n for para in self.paras:\n add_helper(para.children, self)\n\n self.do_update()\n\n def do_update(self):\n if self.update:\n self.update = False\n\n self.value = self.base\n \n if self.functions:\n for i in range(len(self.functions)):\n self.value = self.functions[i](self.value, self.paras[i].do_update())\n\n return self.value\n\n def set_base(self, base):\n if base != self.base:\n self.update = True\n \n self.base = base\n\n def change_base(self, function, para):\n self.set_base(function(self.base, para))\n\nclass Instance(Value):\n def __init__(self, base, query, curve, functions=None, paras=None):\n self.query = query\n self.curve = curve\n \n super().__init__(base, functions, paras)\n\n add_helper(self.curve.children, self)\n\n def do_update(self):\n if self.update:\n self.value = self.curve.do_query(self.query, super().do_update())\n\n return self.value\n\nclass World:\n def __init__(self):\n self.objs = dict()\n self.defines = dict()\n \n self.systems = dict()\n self.systems_id = dict()\n self.values_id = dict()\n\n self.systems_bitmap = bitmap()\n self.values_bitmap = bitmap()\n\n def add_item(self, name, item):\n self.objs[name] = item\n\n def get_item(self, name):\n return self.objs.get(name)\n\n def set_defines(self, item):\n self.defines = item\n\n def do_update(self):\n lst = [obj for obj in self.objs.values() if obj.update]\n\n while lst:\n for child in lst.pop().children:\n if not child.update:\n child.update = True\n\n lst.append(child)\n\n for obj in self.objs.values():\n obj.do_update()\n\n def add_system(self, name, item, parents=None):\n if not name in self.systems:\n self.systems_id[name] = len(self.systems_bitmap)\n self.systems_bitmap.append(False)\n\n for system in self.systems.values():\n system.parents.append(False)\n\n item.parents = self.systems_bitmap.copy()\n item.parents.setall(False)\n\n for value in item.writes:\n if not value in self.values_id:\n self.values_id[value] = len(self.values_bitmap)\n self.values_bitmap.append(False)\n\n for system in self.systems.values():\n system.values.append(False)\n\n item.values = self.values_bitmap.copy()\n item.values.setall(False)\n\n if parents:\n for parent in parents:\n item.parents[self.systems_id[parent]] = True\n\n for value in item.writes:\n item.values[self.values_id[value]] = True\n\n self.systems[name] = item\n\n def do_run(self):\n systems = dict(self.systems)\n self.systems_bitmap.setall(True)\n\n while True:\n self.values_bitmap.setall(False)\n \n lst = []\n \n for name, system in systems.items():\n if not count_and(self.systems_bitmap, system.parents) and not count_and(self.values_bitmap, system.values):\n self.values_bitmap = self.values_bitmap | system.values\n\n lst.append((name, system))\n\n if lst:\n for name, system in lst:\n systems.pop(name)\n self.systems_bitmap[self.systems_id[name]] = False\n system.do_run()\n else:\n break\n\nclass WorldLoader:\n def __init__(self, root):\n self.objs = dict()\n self.defines = dict()\n \n files = []\n templates = {}\n defines = []\n \n for path in glob.glob(f'{root}\\\\World\\\\*.json'):\n files.append(json.load(open(path)))\n for path in glob.glob(f'{root}\\\\Template\\\\*.txt'):\n templates[path.split('\\\\')[-1].split('.')[0].strip()] = open(path).read()\n for path in glob.glob(f'{root}\\\\Defines\\\\*.json'):\n defines.append(json.load(open(path)))\n\n self.defines = parse_list(defines)\n\n for file in files:\n for dct in file:\n name = dct.get('name')\n tp = dct.get('type')\n\n if name and tp:\n if tp == 'Template':\n self.convert_template(templates, templates[name], dct)\n else:\n obj = dict(dct)\n\n obj['parents'] = list()\n obj['children'] = list()\n\n self.objs[name] = obj\n else:\n file.remove(dct)\n\n for name, obj in self.objs.items():\n parents = obj['parents']\n children = obj['children']\n tp = obj['type']\n\n if tp == 'CurveSum':\n for curve in obj['curves']:\n add_helper(parents, curve)\n add_helper(self.objs[curve]['children'], name)\n for paras in obj['paras']:\n for para in paras:\n add_helper(parents, para)\n add_helper(self.objs[para]['children'], name)\n for paras in obj['paras_mean']:\n for para in paras:\n add_helper(parents, para)\n add_helper(self.objs[para]['children'], name)\n else:\n for para in obj['paras']:\n add_helper(parents, para)\n add_helper(self.objs[para]['children'], name)\n\n if tp == 'Instance':\n add_helper(parents, obj['curve'])\n add_helper(self.objs[obj['curve']]['children'], name)\n\n def convert_template(self, templates, template, dct):\n for arg, val in dct.items():\n if f'%{arg}%' in template:\n template = template.replace(f'%{arg}%', str(val))\n\n template = apply_conditional('!', '@', template, dct)\n template = apply_conditional('#', '$', template, dct)\n\n parsed = json.loads(template)\n\n for dct in parsed:\n name = dct.get('name')\n tp = dct.get('type')\n\n if name and tp:\n if tp == 'Template':\n self.convert_template(templates, templates[name], dct)\n else:\n obj = dict(dct)\n\n obj['parents'] = list()\n obj['children'] = list()\n\n self.objs[name] = obj\n\n def gen(self):\n world = World()\n world.set_defines(self.defines)\n\n lst = [pair for pair in self.objs.items() if not pair[1]['parents']]\n\n while lst:\n for (name, obj) in lst:\n self.objs.pop(name)\n \n typ = obj['type']\n\n if typ == 'CurveSum':\n world.add_item(\n name,\n CurveSum(\n [world.get_item(curve) for curve in obj['curves']],\n [[str_2_function(function) for function in functions] for functions in obj['functions']],\n [[world.get_item(para) for para in paras] for paras in obj['paras']],\n [[str_2_function(function) for function in functions] for functions in obj['functions_mean']],\n [[world.get_item(para) for para in paras] for paras in obj['paras_mean']]\n )\n )\n elif typ == 'Curve':\n distrib = obj['distrib']\n \n if distrib == 'gamma':\n distrib = gamma\n elif distrib == 'beta':\n distrib = beta\n elif distrib == 'normal':\n distrib = normal\n\n world.add_item(\n name,\n Curve(\n distrib,\n [world.get_item(para) for para in obj['paras']]\n )\n )\n elif typ == 'Value':\n world.add_item(\n name,\n Value(\n float(obj['base']),\n [str_2_function(function) for function in obj['functions']],\n [world.get_item(para) for para in obj['paras']]\n )\n )\n elif typ == 'Instance':\n query = obj['query']\n\n if query == 'PDF':\n query = Query.PDF\n elif query == 'CDF':\n query = Query.CDF\n elif query == 'PPF':\n query = Query.PPF\n elif query == 'MEAN':\n query = Query.MEAN\n elif query == 'PMEAN':\n query = Query.PMEAN\n\n world.add_item(\n name,\n Instance(\n float(obj['base']),\n query,\n world.get_item(obj['curve']),\n [str_2_function(function) for function in obj['functions']],\n [world.get_item(para) for para in obj['paras']]\n )\n )\n\n for child in obj['children']:\n self.objs[child]['parents'].remove(name)\n\n lst = [pair for pair in self.objs.items() if not pair[1]['parents']]\n \n return world\n\nclass System:\n def __init__(self, writes=None, reads=None):\n self.writes = parse_list(writes)\n self.reads = parse_list(reads)\n self.parents = bitmap()\n self.values = bitmap()\n\n def do_run(self):\n pass\n\nclass SystemTrade(System):\n def __init__(self, world):\n self.pops = world.defines['Pop']\n self.goods = world.defines['Good']\n \n writes = dict()\n reads = dict()\n\n for pop in self.pops:\n for good in self.goods:\n writes[f'{pop} {good} Bid Matched'] = world.get_item(f'{pop} {good} Bid Matched')\n writes[f'{pop} {good} Offer Matched'] = world.get_item(f'{pop} {good} Offer Matched')\n writes[f'{pop} {good} Trade Balance'] = world.get_item(f'{pop} {good} Trade Balance')\n writes[f'{pop} {good} Price'] = world.get_item(f'{pop} {good} Price')\n \n reads[f'{pop} {good} Bid'] = world.get_item(f'{pop} {good} Bid')\n reads[f'{pop} {good} Offer'] = world.get_item(f'{pop} {good} Offer')\n\n super().__init__(writes, reads)\n\n world.add_system(\"Trade System\", self)\n\n def do_run(self):\n def split_sections(price, amount, pop):\n return [[price * 0.75, amount * 0.25, 0, pop], [price, amount * 0.5, 0, pop], [price * 1.25, amount * 0.25, 0, pop]]\n \n for pop in self.pops:\n for good in self.goods:\n self.writes[f'{pop} {good} Bid Matched'].set_base(0)\n self.writes[f'{pop} {good} Offer Matched'].set_base(0)\n self.writes[f'{pop} {good} Trade Balance'].set_base(0)\n\n for good in self.goods:\n bids = []\n offers = []\n\n for pop in self.pops:\n price = self.writes[f'{pop} {good} Price'].value\n bid = self.reads[f'{pop} {good} Bid'].value\n offer = self.reads[f'{pop} {good} Offer'].value\n\n if bid:\n bids.extend(split_sections(price, bid, pop))\n elif offer:\n offers.extend(split_sections(price, offer, pop))\n\n bids.sort(key=lambda item: item[0], reverse=True)\n offers.sort(key=lambda item: item[0])\n\n scores = [[max(bid[0] - offer[0], 0) for offer in offers] for bid in bids]\n\n while True:\n bids_matching = [[0 for offer in offers] for bid in bids]\n\n for i, bid in enumerate(bids):\n if bid[1] > bid[2]:\n sm = 0\n \n for ii, offer in enumerate(offers):\n bids_matching[i][ii] = scores[i][ii] * (offer[1] - offer[2])\n sm += bids_matching[i][ii]\n\n if sm > 0:\n for ii, offer in enumerate(offers):\n bids_matching[i][ii] /= sm\n bids_matching[i][ii] *= bid[1] - bid[2]\n\n if bids_matching[i][ii] > offer[1] - offer[2]:\n bids_matching[i][ii] = offer[1] - offer[2]\n \n trade_volume = 0\n \n for i, offer in enumerate(offers):\n seller_balance = self.writes[f'{offer[3]} {good} Trade Balance']\n \n total_bid = sum([bids_matching[ii][i] for ii, _ in enumerate(bids) if scores[ii][i]])\n total_offer = offer[1] - offer[2]\n\n if total_bid <= total_offer:\n offer[2] += total_bid\n trade_volume += total_bid\n\n for ii, bid in enumerate(bids):\n bidder_balance = self.writes[f'{bid[3]} {good} Trade Balance']\n \n bid[2] += bids_matching[ii][i]\n\n price = (offer[0] + bid[0]) / 2\n\n seller_balance.change_base(ADD, price * bids_matching[ii][i])\n bidder_balance.change_base(SUBT, price * bids_matching[ii][i])\n else:\n offer[2] += total_offer\n trade_volume += total_offer\n worst_bid = min([scores[ii][i] for ii, _ in enumerate(bids) if scores[ii][i] and bids_matching[ii][i]])\n foo = 1 - total_offer / total_bid\n sm = 0\n\n for ii, bid in enumerate(bids):\n if scores[ii][i]:\n bids_matching[ii][i] *= 1 - foo * worst_bid / scores[ii][i]\n sm += bids_matching[ii][i]\n \n for ii, bid in enumerate(bids):\n if scores[ii][i]:\n bidder_balance = self.writes[f'{bid[3]} {good} Trade Balance']\n \n bids_matching[ii][i] *= total_offer / sm\n bid[2] += bids_matching[ii][i]\n\n price = (offer[0] + bid[0]) / 2\n\n seller_balance.change_base(ADD, price * bids_matching[ii][i])\n bidder_balance.change_base(SUBT, price * bids_matching[ii][i])\n \n if trade_volume == 0:\n break\n\n for bid in bids:\n self.writes[f'{bid[3]} {good} Bid Matched'].change_base(ADD, bid[2])\n self.writes[f'{bid[3]} {good} Price'].change_base(MULT, 1 + (0.5 - bid[2] / bid[1]) / 50)\n for offer in offers:\n self.writes[f'{offer[3]} {good} Offer Matched'].change_base(ADD, offer[2])\n self.writes[f'{offer[3]} {good} Price'].change_base(MULT, 1 + (offer[2] / offer[1] - 0.5) / 50)\n \n \ndef add_helper(lst, item):\n if not item in lst:\n lst.append(item)\n\ndef run_helper(funct, paras, x=None):\n if len(paras) == 4:\n if x:\n return funct(x, paras[2].value, paras[3].value, paras[0].value, paras[1].value)\n else:\n return funct(paras[2].value, paras[3].value, paras[0].value, paras[1].value)\n if len(paras) == 3:\n if x:\n return funct(x, paras[2].value, paras[0].value, paras[1].value)\n else:\n return funct(paras[2].value, paras[0].value, paras[1].value)\n elif len(paras) == 2:\n if x:\n return funct(x, paras[0].value, paras[1].value)\n else:\n return funct(paras[0].value, paras[1].value)\n\n return None\n\ndef str_2_function(string):\n if string == 'SET':\n return SET\n elif string == 'MAX':\n return MAX\n elif string == 'MIN':\n return MIN\n elif string == 'MULT':\n return MULT\n elif string == 'DIV':\n return DIV\n elif string == 'ADD':\n return ADD\n elif string == 'SUBT':\n return SUBT\n elif string == 'POW':\n return POW\n elif string == 'ROOT':\n return ROOT\n elif string == 'LOG':\n return LOG\n\n return None\n\ndef parse_list(item):\n if type(item) == type(dict()):\n return item\n elif type(item) == type(list()):\n return dict(ChainMap(*item))\n else:\n return dict()\n \ndef apply_conditional(k0, k1, template, dct):\n i = 0\n \n while True:\n i = template.find(k0, i)\n\n if i + 1:\n ii = template.find(k0, i + 1)\n\n block = template[i:ii + 1]\n \n check = block[block.find(k1) + 1:]\n check = check[:check.find(k1)]\n\n if check in dct:\n block = block.replace(f'{k1}{check}{k1}', '').strip(k0 + k1)\n else:\n block = ''\n\n template = template[:i] + block + template[ii + 1:]\n else:\n break\n\n return template\n \nif __name__ == '__main__':\n world = WorldLoader('C:\\\\Users\\\\wogud\\\\Desktop\\\\Prototype').gen()\n SystemTrade(world)\n world.do_run()\n world.do_update()\n\n print(world.get_item('Peasants Food Trade Balance').value)\n print(world.get_item('Peasants Timber Trade Balance').value)\n print(world.get_item('Peasants Fiber Trade Balance').value)\n print(world.get_item('Peasants Tools Trade Balance').value)\n print('--------------')\n print(world.get_item('Peasants Food Offer Matched').value)\n print(world.get_item('Peasants Timber Offer Matched').value)\n print(world.get_item('Peasants Fiber Offer Matched').value)\n print(world.get_item('Peasants Tools Bid Matched').value)\n print('--------------')\n print(world.get_item('Peasants Food Offer').value)\n print(world.get_item('Peasants Timber Offer').value)\n print(world.get_item('Peasants Fiber Offer').value)\n print(world.get_item('Peasants Tools Bid').value)\n print('--------------')\n print(world.get_item('Craftsmen Food Bid').value)\n print(world.get_item('Craftsmen Timber Bid').value)\n print(world.get_item('Craftsmen Fiber Bid').value)\n print(world.get_item('Craftsmen Tools Offer').value)\n print('--------------')\n print(world.get_item('Peasants Food Price').value)\n print(world.get_item('Peasants Timber Price').value)\n print(world.get_item('Peasants Fiber Price').value)\n print(world.get_item('Peasants Tools Price').value)\n print('--------------')\n print(world.get_item('Craftsmen Food Price').value)\n print(world.get_item('Craftsmen Timber Price').value)\n print(world.get_item('Craftsmen Fiber Price').value)\n print(world.get_item('Craftsmen Tools Price').value)\n \n \"\"\"\n foo = world.get_item(\"Pops Total\")\n\n a = time.monotonic()\n\n for _ in range(100):\n foo.change_base(ADD, 1)\n world.do_run()\n world.do_update()\n\n print(time.monotonic() - a)\n \"\"\"\n","sub_path":"prototype.py","file_name":"prototype.py","file_ext":"py","file_size_in_byte":24846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"307590774","text":"#!/usr/bin/env python\nfrom spt3g import core, maps\nimport numpy as np\n\na = core.quat(2,3,4,5)\na2 = core.quat(2,1,8,5)\nb = core.G3VectorQuat([a, a**2, 2*a, a2])\n\nassert(np.isclose(maps.quat_to_ang(a), maps.c_quat_to_ang_(a)).all())\nassert(np.isclose(maps.quat_to_ang(b), np.asarray([maps.c_quat_to_ang_(x) for x in b]).transpose()).all())\n\nangle = (.3, 0.4)\nangles = ((.3, 0.8), (0.4, 0.2))\n\nassert(maps.ang_to_quat(angle[0], angle[1]) == maps.c_ang_to_quat_(angle[0], angle[1]))\nalpha, delta = list(zip(angles[0], angles[1]))\nassert((np.asarray(maps.ang_to_quat(alpha, delta)) == np.asarray(core.G3VectorQuat([maps.c_ang_to_quat_(a[0], a[1]) for a in angles]))).all())\n\nassert(np.isclose(maps.quat_to_ang(maps.c_ang_to_quat_(angle[0], angle[1])), angle).all())\n\n","sub_path":"maps/tests/quatangtest.py","file_name":"quatangtest.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"546402880","text":"import unittest\nfrom flask_app import create_app\n\n\nclass TestHelloWorld(unittest.TestCase):\n \"\"\"Tests the hello_world method.\"\"\"\n def setUp(self):\n self.client = create_app({'TESTING': True}).test_client()\n\n def test_hello_world(self):\n string = self.client.get('/').data\n\n assert string == b'Hello, world!'\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_hello_world.py","file_name":"test_hello_world.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"70256716","text":"import App\ndef CreateAI(pShip):\n\t#########################################\n\t# Creating PlainAI AttackedScript at (130, 200)\n\tpAttackedScript = App.PlainAI_Create(pShip, \"AttackedScript\")\n\tpAttackedScript.SetScriptModule(\"RunScript\")\n\tpAttackedScript.SetInterruptable(1)\n\tpScript = pAttackedScript.GetScriptInstance()\n\tpScript.SetScriptModule(\"Maelstrom.Episode3.E3M4.E3M4\")\n\tpScript.SetFunction(\"ShouldIgnoreAttack\")\n\t# Done creating PlainAI AttackedScript\n\t#########################################\n\t#########################################\n\t# Creating PlainAI WarpNowhere at (229, 281)\n\tpWarpNowhere = App.PlainAI_Create(pShip, \"WarpNowhere\")\n\tpWarpNowhere.SetScriptModule(\"Warp\")\n\tpWarpNowhere.SetInterruptable(1)\n\t# Done creating PlainAI WarpNowhere\n\t#########################################\n\t#########################################\n\t# Creating ConditionalAI IfDamaged_2 at (228, 243)\n\t## Conditions:\n\t#### Condition HullLow\n\tpHullLow = App.ConditionScript_Create(\"Conditions.ConditionSystemBelow\", \"ConditionSystemBelow\", pShip.GetName (),App.CT_HULL_SUBSYSTEM,.5)\n\t#### Condition PowerSystemLow\n\tpPowerSystemLow = App.ConditionScript_Create(\"Conditions.ConditionSystemBelow\", \"ConditionSystemBelow\", pShip.GetName (), App.CT_POWER_SUBSYSTEM, .5)\n\t#### Condition WarpSystemDamaged\n\tpWarpSystemDamaged = App.ConditionScript_Create(\"Conditions.ConditionSystemBelow\", \"ConditionSystemBelow\", pShip.GetName (), App.CT_WARP_ENGINE_SUBSYSTEM, .5)\n\t## Evaluation function:\n\tdef EvalFunc(bHullLow, bPowerSystemLow, bWarpSystemDamaged):\n\t\tACTIVE = App.ArtificialIntelligence.US_ACTIVE\n\t\tDORMANT = App.ArtificialIntelligence.US_DORMANT\n\t\tDONE = App.ArtificialIntelligence.US_DONE\n\t\tif (bHullLow or bPowerSystemLow or bWarpSystemDamaged):\n\t\t\treturn ACTIVE\n\t\telse:\n\t\t\treturn DORMANT\n\t## The ConditionalAI:\n\tpIfDamaged_2 = App.ConditionalAI_Create(pShip, \"IfDamaged_2\")\n\tpIfDamaged_2.SetInterruptable(1)\n\tpIfDamaged_2.SetContainedAI(pWarpNowhere)\n\tpIfDamaged_2.AddCondition(pHullLow)\n\tpIfDamaged_2.AddCondition(pPowerSystemLow)\n\tpIfDamaged_2.AddCondition(pWarpSystemDamaged)\n\tpIfDamaged_2.SetEvaluationFunction(EvalFunc)\n\t# Done creating ConditionalAI IfDamaged_2\n\t#########################################\n\t#########################################\n\t# Creating CompoundAI AttackPlayer at (318, 244)\n\timport AI.Compound.BasicAttack\n\tpAttackPlayer = AI.Compound.BasicAttack.CreateAI(pShip, \"player\", Difficulty = 0.3, SmartShields = 0, WarpOutBeforeDying = 1)\n\t# Done creating CompoundAI AttackPlayer\n\t#########################################\n\t#########################################\n\t# Creating CompoundAI AttackNagus at (406, 243)\n\timport AI.Compound.BasicAttack\n\tpAttackNagus = AI.Compound.BasicAttack.CreateAI(pShip, \"Krayvis\", Difficulty = 0.34, SmartShields = 0, WarpOutBeforeDying = 1)\n\t# Done creating CompoundAI AttackNagus\n\t#########################################\n\t#########################################\n\t# Creating PriorityListAI PriorityList at (227, 167)\n\tpPriorityList = App.PriorityListAI_Create(pShip, \"PriorityList\")\n\tpPriorityList.SetInterruptable(1)\n\t# SeqBlock is at (239, 207)\n\tpPriorityList.AddAI(pIfDamaged_2, 1)\n\tpPriorityList.AddAI(pAttackPlayer, 2)\n\tpPriorityList.AddAI(pAttackNagus, 3)\n\t# Done creating PriorityListAI PriorityList\n\t#########################################\n\t#########################################\n\t# Creating SequenceAI Sequence at (130, 136)\n\tpSequence = App.SequenceAI_Create(pShip, \"Sequence\")\n\tpSequence.SetInterruptable(1)\n\tpSequence.SetLoopCount(1)\n\tpSequence.SetResetIfInterrupted(1)\n\tpSequence.SetDoubleCheckAllDone(0)\n\tpSequence.SetSkipDormant(0)\n\t# SeqBlock is at (158, 176)\n\tpSequence.AddAI(pAttackedScript)\n\tpSequence.AddAI(pPriorityList)\n\t# Done creating SequenceAI Sequence\n\t#########################################\n\t#########################################\n\t# Creating ConditionalAI IfAttackedPlayer at (129, 97)\n\t## Conditions:\n\t#### Condition AttackedPlayer\n\tpAttackedPlayer = App.ConditionScript_Create(\"Conditions.ConditionAttacked\", \"ConditionAttacked\", \"player\", 0.0, 0.0, 0.0)\n\t## Evaluation function:\n\tdef EvalFunc(bAttackedPlayer):\n\t\tACTIVE = App.ArtificialIntelligence.US_ACTIVE\n\t\tDORMANT = App.ArtificialIntelligence.US_DORMANT\n\t\tDONE = App.ArtificialIntelligence.US_DONE\n\t\tif (bAttackedPlayer):\n\t\t\treturn ACTIVE\n\t\telse:\n\t\t\treturn DORMANT\n\t## The ConditionalAI:\n\tpIfAttackedPlayer = App.ConditionalAI_Create(pShip, \"IfAttackedPlayer\")\n\tpIfAttackedPlayer.SetInterruptable(1)\n\tpIfAttackedPlayer.SetContainedAI(pSequence)\n\tpIfAttackedPlayer.AddCondition(pAttackedPlayer)\n\tpIfAttackedPlayer.SetEvaluationFunction(EvalFunc)\n\t# Done creating ConditionalAI IfAttackedPlayer\n\t#########################################\n\t#########################################\n\t# Creating CompoundAI AttackPlayer_2 at (217, 97)\n\timport AI.Compound.BasicAttack\n\tpAttackPlayer_2 = AI.Compound.BasicAttack.CreateAI(pShip, \"player\", Difficulty = 0.5, ChooseSubsystemTargets = 0, SmartShields = 0, WarpOutBeforeDying = 1)\n\t# Done creating CompoundAI AttackPlayer_2\n\t#########################################\n\t#########################################\n\t# Creating PriorityListAI PriorityList_2 at (129, 25)\n\tpPriorityList_2 = App.PriorityListAI_Create(pShip, \"PriorityList_2\")\n\tpPriorityList_2.SetInterruptable(1)\n\t# SeqBlock is at (153, 65)\n\tpPriorityList_2.AddAI(pIfAttackedPlayer, 1)\n\tpPriorityList_2.AddAI(pAttackPlayer_2, 2)\n\t# Done creating PriorityListAI PriorityList_2\n\t#########################################\n\t#########################################\n\t# Creating PreprocessingAI AvoidObstacles at (40, 26)\n\t## Setup:\n\timport AI.Preprocessors\n\tpScript = AI.Preprocessors.AvoidObstacles()\n\t## The PreprocessingAI:\n\tpAvoidObstacles = App.PreprocessingAI_Create(pShip, \"AvoidObstacles\")\n\tpAvoidObstacles.SetInterruptable(1)\n\tpAvoidObstacles.SetPreprocessingMethod(pScript, \"Update\")\n\tpAvoidObstacles.SetContainedAI(pPriorityList_2)\n\t# Done creating PreprocessingAI AvoidObstacles\n\t#########################################\n\treturn pAvoidObstacles\n","sub_path":"scripts/Maelstrom/Episode3/E3M4/E3M4GalorChaseAI2.py","file_name":"E3M4GalorChaseAI2.py","file_ext":"py","file_size_in_byte":6004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"598689465","text":"#coding=utf-8\n\n#结束条件,遍历到最后一个元素截止\n#两重循环\n#外部循环遍历每一个元素\n#内部循环是一次与前面已经排序过的元素进行比较\ndef insertSort(list):\n for i in range(1, len(list)):\n temp = list[i]\n j = i-1 #这里必须要取j=i-1\n while j>=0 and temp.\nimport os\nimport cv2\n\nimport numpy as np\nfrom skimage.transform import AffineTransform\nimport tables\nfrom glob import glob\nfrom os.path import join as pjoin\nfrom datetime import datetime\nfrom skimage.transform import warp\nfrom multiprocessing import Pool, cpu_count\nfrom functools import partial\nfrom scipy.interpolate import interp1d\nfrom scipy.sparse import load_npz, issparse,csr_matrix\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\nimport h5py\nimport sys\n\nimport Preprocessing_Utils\n\ncv2.setNumThreads(10)\n\n\ndef get_blue_file(base_directory):\n file_list = os.listdir(base_directory)\n for file in file_list:\n if \"Blue_Data\" in file:\n return base_directory + \"/\" + file\n\ndef get_violet_file(base_directory):\n file_list = os.listdir(base_directory)\n for file in file_list:\n if \"Violet_Data\" in file:\n return base_directory + \"/\" + file\n\ndef parinit():\n import os\n os.environ['MKL_NUM_THREADS'] = \"1\"\n os.environ['OMP_NUM_THREADS'] = \"1\"\n\n\ndef runpar(f,X,nprocesses = None,**kwargs):\n '''\n res = runpar(function, # function to execute\n data, # data to be passed to the function\n nprocesses = None, # defaults to the number of cores on the machine\n **kwargs) # additional arguments passed to the function (dictionary)\n '''\n if nprocesses is None:\n nprocesses = cpu_count()\n with Pool(initializer = parinit, processes=nprocesses) as pool:\n res = pool.map(partial(f,**kwargs),X)\n pool.join()\n return res\n\n\n\n\ndef findTransformECC(template, dst, M, warp_mode, criteria, inputMask, gaussFiltSize):\n return cv2.findTransformECC(template, dst,\n M, warp_mode,\n criteria,\n inputMask=inputMask,\n gaussFiltSize=gaussFiltSize)\n\n\ncv2ver = cv2.__version__.split('.')\nif (int(cv2ver[0]) == 3) and (int(cv2ver[1]) <= 4):\n if int(cv2ver[2]) <= 5:\n def findTransformECC(template,\n dst,\n M,\n warp_mode,\n criteria,\n inputMask,\n gaussFiltSize):\n return cv2.findTransformECC(template, dst,\n M, warp_mode,\n criteria,\n inputMask=inputMask)\nelif (int(cv2ver[0]) == 4) and (int(cv2ver[1]) <= 1):\n # gaussFiltSize is a mandatory input on opencv 4.4 but not 4.1\n def findTransformECC(template,\n dst,\n M,\n warp_mode,\n criteria,\n inputMask,\n gaussFiltSize):\n return cv2.findTransformECC(template, dst,\n M, warp_mode,\n criteria,\n inputMask=inputMask)\n\n\ndef registration_ecc(frame, template,\n niter=1000,\n eps0=1e-3,\n warp_mode=cv2.MOTION_EUCLIDEAN,\n prepare=True,\n gaussian_filter=1,\n hann=None,\n **kwargs):\n h, w = template.shape\n if hann is None:\n hann = cv2.createHanningWindow((w, h), cv2.CV_32FC1)\n hann = (hann * 255).astype('uint8')\n dst = frame.astype('float32')\n M = np.eye(2, 3, dtype=np.float32)\n criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,\n niter, eps0)\n (res, M) = findTransformECC(template, dst,\n M, warp_mode,\n criteria,\n inputMask=hann, gaussFiltSize=gaussian_filter)\n\n dst = cv2.warpAffine(frame, M, (w, h), flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP);\n\n return M, np.clip(dst, 0, (2 ** 16 - 1)).astype('uint16')\n\n\ndef _xy_rot_from_affine(affines):\n '''\n helper function to parse affine parameters from ECC\n '''\n xy = []\n rot = []\n for r in affines:\n M = np.vstack([r, np.array([0, 0, 1])])\n M = AffineTransform(M)\n xy.append(M.translation)\n rot.append(M.rotation)\n rot = np.rad2deg(np.array(rot))\n xy = np.array(xy)\n return xy, rot\n\n\ndef registration_upsample(frame, template):\n h, w = frame.shape\n dst = frame.astype('float32')\n (xs, ys), sf = cv2.phaseCorrelate(dst, template.astype('float32'))\n M = np.float32([[1, 0, xs], [0, 1, ys]])\n dst = cv2.warpAffine(dst, M, (w, h))\n return (xs, ys), (np.clip(dst, 0, (2 ** 16 - 1))).astype('uint16')\n\n\ndef load_generous_mask(home_directory):\n\n # Loads the mask for a video, returns a list of which pixels are included, as well as the original image height and width\n mask = np.load(home_directory + \"/Generous_Mask.npy\")\n\n image_height = np.shape(mask)[0]\n image_width = np.shape(mask)[1]\n\n mask = np.where(mask>0.1, 1, 0)\n mask = mask.astype(int)\n flat_mask = np.ndarray.flatten(mask)\n indicies = np.argwhere(flat_mask)\n indicies = np.ndarray.astype(indicies, int)\n indicies = np.ndarray.flatten(indicies)\n\n return indicies, image_height, image_width\n\n\ndef _register_multichannel_stack(frames, templates, mode='2d',\n niter=100,\n eps0=1e-3,\n warp_mode=cv2.MOTION_EUCLIDEAN): # mode 2d\n\n nframes, nchannels, h, w = frames.shape\n if mode == 'ecc':\n hann = cv2.createHanningWindow((w, h), cv2.CV_32FC1)\n hann = (hann * 255).astype('uint8')\n\n ys = np.zeros((nframes, nchannels), dtype=np.float32)\n xs = np.zeros((nframes, nchannels), dtype=np.float32)\n rot = np.zeros((nframes, nchannels), dtype=np.float32)\n stack = np.zeros_like(frames, dtype='uint16')\n\n for ichan in range(nchannels):\n chunk = frames[:, ichan].squeeze()\n if mode == '2d':\n res = runpar(registration_upsample, chunk,\n template=templates[ichan])\n ys[:, ichan] = np.array([r[0][1] for r in res], dtype='float32')\n xs[:, ichan] = np.array([r[0][0] for r in res], dtype='float32')\n\n elif mode == 'ecc':\n res = runpar(registration_ecc, chunk,\n template=templates[ichan],\n hann=hann,\n niter=niter,\n eps0=eps0,\n warp_mode=warp_mode)\n\n\n xy, rots = _xy_rot_from_affine([r[0] for r in res])\n ys[:, ichan] = xy[:, 1]\n xs[:, ichan] = xy[:, 0]\n rot[:, ichan] = rots\n stack[:, ichan, :, :] = np.stack([r[1] for r in res])\n return (xs, ys, rot), stack\n\n\n\n\n\n\ndef get_reference_images(blue_matrix, violet_matrix, reference_size=60, start=1000, image_height=600, image_width=608, mode='ecc'):\n\n # Extract Chunk Data\n combined_data = np.array([blue_matrix[:, start:start + reference_size], violet_matrix[:, start:start + reference_size]])\n\n # Reshape Combined Data\n channels, pixels, frames = np.shape(combined_data)\n combined_data = combined_data.reshape(channels, image_height, image_width, frames)\n combined_data = np.moveaxis(combined_data, [0, 1, 2, 3], [1, 2, 3, 0])\n\n # Align Frames Within This Chunk\n refs = combined_data[0].astype('float32')\n _, refs = _register_multichannel_stack(combined_data, refs, mode=mode)\n\n # Take The Mean\n refs = np.mean(refs, axis=0).astype('float32')\n\n return refs\n\n\n\n\ndef plot_registration_shifts(base_directory):\n print(\"Plotting registration\", base_directory)\n\n # Load Data\n x_shifts = np.load(os.path.join(base_directory, \"X_Shifts.npy\"))\n y_shifts = np.load(os.path.join(base_directory, \"Y_Shifts.npy\"))\n r_shifts = np.load(os.path.join(base_directory, \"R_Shifts.npy\"))\n\n # Create Figure\n figure_1 = plt.figure()\n rows = 2\n columns = 1\n translation_axis = figure_1.add_subplot(rows, columns, 1)\n rotation_axis = figure_1.add_subplot(rows, columns, 2)\n\n # Plot Data\n translation_axis.plot(x_shifts, c='b')\n translation_axis.plot(y_shifts, c='r')\n rotation_axis.plot(r_shifts, c='g')\n\n # Save Figure\n plt.savefig(os.path.join(base_directory, \"Motion_Correction_Shifts.png\"))\n plt.close()\n\n\ndef perform_motion_correction(base_directory, output_directory, output_file=\"Motion_Corrected_Mask_Data.hdf5\"):\n\n # Get Blue and Violet Files\n blue_file = get_blue_file(base_directory)\n violet_file = get_violet_file(base_directory)\n\n # Load Mask\n indicies, image_height, image_width = load_generous_mask(output_directory)\n\n\n # Load Data\n blue_data_container = h5py.File(blue_file, 'r')\n violet_data_container = h5py.File(violet_file, 'r')\n blue_data = blue_data_container[\"Data\"]\n violet_data = violet_data_container[\"Data\"]\n\n # Get Reference Images\n reference_images = get_reference_images(blue_data, violet_data)\n\n # Get Chunk Structure\n number_of_pixels, number_of_frames = np.shape(blue_data)\n number_of_active_pixels = len(indicies)\n preferred_chunk_size = 10000\n number_of_chunks, chunk_sizes, chunk_starts, chunk_stops = Preprocessing_Utils.get_chunk_structure(preferred_chunk_size, number_of_frames)\n\n x_shifts = []\n y_shifts = []\n r_shifts = []\n\n # Process Data\n #file_cache_size = 16561440000\n with h5py.File(os.path.join(base_directory, output_file), \"w\") as f:\n corrected_blue_dataset = f.create_dataset(\"Blue_Data\", (number_of_active_pixels, number_of_frames), dtype=np.uint16, chunks=True, compression=\"gzip\")\n corrected_violet_dataset = f.create_dataset(\"Violet_Data\", (number_of_active_pixels, number_of_frames), dtype=np.uint16, chunks=True, compression=\"gzip\")\n\n for chunk_index in range(number_of_chunks):\n print(\"Chunk Index\", chunk_index, \" of \", number_of_chunks, \"Time: \", datetime.now())\n chunk_start = int(chunk_starts[chunk_index])\n chunk_stop = int(chunk_stops[chunk_index])\n chunk_size = chunk_sizes[chunk_index]\n print(\"Chunk start\", chunk_start)\n print(\"Chubk Stop\", chunk_stop)\n\n # Load Chunk Data\n combined_data = np.array([blue_data[:, chunk_start:chunk_stop], violet_data[:, chunk_start:chunk_stop]])\n\n # Reshape Combined Data\n channels, pixels, frames = np.shape(combined_data)\n combined_data = combined_data.reshape(channels, image_height, image_width, frames)\n combined_data = np.moveaxis(combined_data, [0, 1, 2, 3], [1, 2, 3, 0])\n\n # Perform Motion Correction\n (xs, ys, rot), corrected = _register_multichannel_stack(combined_data, reference_images, mode='ecc')\n\n # Record The Shifts\n x_shifts.append(xs)\n y_shifts.append(ys)\n r_shifts.append(rot)\n\n # Reshape The Corrected Data\n combined_data = None\n \n corrected_blue = corrected[:, 0]\n corrected_violet = corrected[:, 1]\n\n # Changed to work with Dylans mouse\n #corrected_blue = blue_data[:, chunk_start:chunk_stop]\n #corrected_violet = violet_data[:, chunk_start:chunk_stop]\n\n corrected_blue = np.reshape(corrected_blue, (chunk_size, image_height * image_width))\n corrected_violet = np.reshape(corrected_violet, (chunk_size, image_height * image_width))\n\n # Select Only The Masked Pixels\n corrected_blue = corrected_blue[:, indicies]\n corrected_violet = corrected_violet[:, indicies]\n\n # Put Back\n corrected_blue_dataset[:, chunk_start:chunk_stop] = np.transpose(corrected_blue)\n corrected_violet_dataset[:, chunk_start:chunk_stop] = np.transpose(corrected_violet)\n\n x_shifts = np.concatenate(x_shifts)\n y_shifts = np.concatenate(y_shifts)\n r_shifts = np.concatenate(r_shifts)\n\n np.save(os.path.join(output_directory, \"X_Shifts.npy\"), x_shifts)\n np.save(os.path.join(output_directory, \"Y_Shifts.npy\"), y_shifts)\n np.save(os.path.join(output_directory, \"R_Shifts.npy\"), r_shifts)\n\n # Plot Registration Shifts\n plot_registration_shifts(output_directory)","sub_path":"Preprocessing/Motion_Correction.py","file_name":"Motion_Correction.py","file_ext":"py","file_size_in_byte":13197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"194016031","text":"\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n'''\nCreated on Mar 3, 2020\n\n@author: ballance\n'''\n\nfrom unittest import TestCase\nfrom antlr4.InputStream import InputStream\nfrom pssparser.cu_parser import CUParser\nfrom _io import StringIO\n\nclass TestDefaultConstraint(TestCase):\n\n def _runTest(self, text, name):\n input_stream = InputStream(text)\n parser = CUParser(input_stream, name)\n cu = parser.parse()\n \n if len(cu.markers) > 0:\n print(\"Test Failed:\")\n in_reader = StringIO(text)\n i=1\n while True:\n line = in_reader.readline()\n if line == \"\":\n break\n line = line[:-1]\n print(\"%3d: %s\" % (i, line))\n i+=1\n \n self.assertEqual(len(cu.markers), 0, \"Errors\") \n \n def test_example_1(self):\n text = \"\"\"\n struct my_struct {\n rand int in [0..3] attr1;\n constraint default attr1 == 0; // (1)\n \n rand int in [0..3] attr2;\n constraint attr1 < attr2; // (2)\n };\n \n //\n component container {\n //\n\n action my_action {\n rand my_struct s1;\n \n rand my_struct s2;\n constraint default s2.attr1 == 2; // (3)\n \n rand my_struct s3;\n constraint default disable s3.attr1; // (4)\n constraint s3.attr1 > 0; // (5)\n };\n\n //\n }\n //\n \"\"\"\n\n self._runTest(text, \"test_example_1\")","sub_path":"unit/test_default_constraints.py","file_name":"test_default_constraints.py","file_ext":"py","file_size_in_byte":2317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"513832661","text":"from django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.http import HttpResponse, Http404, HttpResponseRedirect\nfrom django.shortcuts import render, get_object_or_404\nfrom django.db.models import Q\n\nfrom .forms import PostModelForm\nfrom .models import PostModel\nfrom datetime import datetime, date\n\n\ndef temp_test(request):\n template = 'blog/test-view.html'\n projects_list = [\"Market Place\", \"Bonds Data\", \"PMService\", \"Algorithm\"]\n context = {\n \"name\": \"Bahmani\",\n \"lucky\": 75,\n \"buffer\": 35,\n \"bdate\": date(1985,6,28),\n \"objects_list\":projects_list,\n \"title\": \"Ava\"\n }\n messages.success(request, \"Test\", fail_silently=True, extra_tags='test')\n return render(request, template, context)\n\n\ndef post_model_create_view(request):\n # if request.method == \"POST\":\n # print(request.POST)\n # form = PostModelForm(request.POST)\n # if form.is_valid():\n # form.save(commit=False)\n # print(form.cleaned_data)\n\n form = PostModelForm(request.POST or None)\n context ={\n \"form\": form\n }\n if form.is_valid():\n obj = form.save(commit=False)\n print(form.cleaned_data)\n obj.save()\n messages.success(request, \"Created a new blog post\")\n context = {\n \"form\": PostModelForm()\n }\n return HttpResponseRedirect(f\"/blog/{obj.id}\")\n\n template = \"blog/create-view.html\"\n return render(request, template, context)\n\n\ndef post_model_delete_view(request, id):\n target_obj = get_object_or_404(PostModel, id=id)\n if request.method == 'POST':\n target_obj.delete()\n #messages.success(request, \"Post deleted\")\n return HttpResponseRedirect(\"/blog/\")\n\n context = {\n \"object\": target_obj\n }\n template = \"blog/delete-view.html\"\n return render(request, template, context)\n\n\ndef post_model_update_view(request, id):\n target_obj = get_object_or_404(PostModel, id=id)\n form = PostModelForm(request.POST or None, instance=target_obj)\n context = {\n \"object\": target_obj,\n \"form\": form\n }\n if form.is_valid():\n obj = form.save(commit=False)\n print(\"Form updated successfully\")\n obj.save()\n messages.success(request, f\"Updated post with id {id} successfully\")\n context = {\n \"form\": PostModelForm()\n }\n return HttpResponseRedirect(f\"/blog/{id}\")\n\n template = \"blog/update-view.html\"\n\n return render(request, template, context)\ndef post_model_detail_view(request, id):\n # try:\n # obj = PostModel.objects.get(id=id)\n # except ObjectDoesNotExist:\n # obj = f\"No post with id {id}\"\n obj = get_object_or_404(PostModel, id=id)\n context = {\n \"object\": obj\n }\n template = \"blog/detail-view.html\"\n return render(request, template, context)\n\n\n#@login_required()\ndef post_model_list_view(request):\n print(request.GET)\n query = request.GET.get(\"q\")\n qs = PostModel.objects.all()\n if not(query is None):\n qs = qs.filter(\n Q(title__icontains=query) |\n Q(content__icontains=query)\n )\n\n print(request.user)\n\n print(qs)\n\n context = {\n \"object_list\": qs,\n \"name_list\": ['Bahman', 'Salehi']\n }\n if request.user.is_authenticated:\n template = 'blog/list-view.html'\n context['user_status'] = 'Authenticated'\n else:\n template = 'blog/list-view-public.html'\n context['user_status'] = 'public'\n #raise Http404\n\n return render(request, template, context)","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"413118872","text":"print(\"Ingrese un numero para determinar si este es par o impar (Ingrese 0 o un caracter no numerico para salir)\")\n#Se usa un bucle para que el programa no necesariamente se cierre al finalizar el proceso\nwhile(True):\n #Se especta un valor float(decimal) o traduce uno int(entero) a float, saliendo así del programa al ser ingresado un caracter diferente(letras,simbolos)\n numero = float(input(\"\\n>>\"))\n #Pues no es objeto de prueba el 0 se ha definido para terminar la vida del programa (cerrar el bucle)\n if numero == 0 :\n print(\"El número es cero\\nEl programa se cerrará\")\n break\n #División modular (división por residuo)\n elif numero % 2 == 0 :\n print(\"El número es uno par\")\n else :\n print(\"El número es uno impar\")\n \n \n","sub_path":"485.py","file_name":"485.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"226107761","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nT = np.loadtxt('Trasmissione.CSV')\nR = np.loadtxt('Riflessione.CSV')\n\nlenT = len(T[:,1])\nlenR = len(R[:,1])\n\nlength = min(lenR,lenT) - 1500\nshift = 0\n\nmstomin = 1e-4/6.\nTtime = T[:length,1]*mstomin\nRtime = R[:length,1]*mstomin\nTint = T[:length,0]\nRint = R[shift:length+shift,0]\n\n#plt.plot(Ttime, Tint)\n#plt.plot(Rtime, Rint*96)\n#plt.plot(Rtime, Rint+Tint)\n#plt.show()\n\nplt.plot(Rtime, Rint/Tint*96.)\nplt.xlabel('Time [min]')\nplt.ylabel('Reflected/Transmitted * 96')\n#plt.show()\nplt.savefig('Reflected over Transmitted')","sub_path":"source status/SMFTest.py","file_name":"SMFTest.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"382265089","text":"# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.9.1+dev\n# kernelspec:\n# display_name: Python [conda env:generic_expression] *\n# language: python\n# name: conda-env-generic_expression-py\n# ---\n\n# # Expression of Crow data\n#\n# This notebook tests the hypothesis that the RNA-seq generic genes are those that are not well captured on microarray technology. [Zhao et. al.](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3894192/) found that one possible reason for differences in differentially expressed genes detected on different platforms is due to how genes are captured. RNA-Seq is more likely to detect the changes at two different conditions for genes with very low expression or very high expression compared to arrays.\n#\n# This data was generated by running `download_Crow_data.R` script that downloads expression data from https://github.com/PavlidisLab/gemmaAPI.R\n\n# +\n# %load_ext autoreload\n# %matplotlib inline\n\nimport os\nimport random\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom ponyo import utils\nfrom generic_expression_patterns_modules import ranking\n\n# +\n# Read in config variables\nbase_dir = os.path.abspath(os.path.join(os.getcwd(), \"../\"))\n\nconfig_filename = os.path.abspath(\n os.path.join(base_dir, \"configs\", \"config_human_general.tsv\")\n)\n\nparams = utils.read_config(config_filename)\nlocal_dir = params[\"local_dir\"]\ndataset_name = params[\"dataset_name\"]\nproject_id = params[\"project_id\"]\ncol_to_rank_genes = params[\"rank_genes_by\"]\nmapped_compendium_filename = params[\"mapped_compendium_filename\"]\n\nif_single_experiment = False\n# -\n\n# Read in recount2 expression compendium\nrecount2_expression = pd.read_csv(\n mapped_compendium_filename, sep=\"\\t\", index_col=0, header=0\n).T\n\n# ## Format Crow expression data\n#\n# * Include only genes that were used in our original analysis\n\n# Read in Crow expression data\ncrow_expression_filename = os.path.join(local_dir, \"Crow_expression_data_union.tsv\")\ncrow_expression_data = pd.read_csv(\n crow_expression_filename, sep=\"\\t\", index_col=0, header=0\n).T\n\ncrow_expression_data.shape\n\ncrow_expression_data.head()\n\n# +\n# Load gene_summary_filename\ngene_summary_filename = os.path.join(\n base_dir, dataset_name, f\"generic_gene_summary_{project_id}.tsv\"\n)\n\nsummary_gene_ranks = pd.read_csv(gene_summary_filename, sep=\"\\t\", index_col=0, header=0)\n# -\n\nsummary_gene_ranks.head()\n\n# +\n# Subset genes\nour_gene_ids = list(summary_gene_ranks.index)\ncrow_gene_ids = list(crow_expression_data.index)\n\nshared_gene_ids = set(crow_gene_ids).intersection(our_gene_ids)\n\nexpression_data = crow_expression_data.loc[shared_gene_ids]\n# -\n\nprint(expression_data.shape)\nexpression_data.head()\n\n# ## (optional) Select gene subset of samples\n#\n# Select samples from a specific experiment to examine local gene expression behavior within a single experiment (local) in addition to across all samples (global)\n#\n# We would actually like to do this for crow data but, we do not have metadata mapping experiment ids to sample ids. So this analysis option is only available for recount2 for now\n\nrecount2_metadata_filename = os.path.join(\n base_dir, dataset_name, \"data\", \"metadata\", \"recount2_metadata.tsv\"\n)\n\n\n# Function scraped from ponyo since we're already using a different version of ponyo in this repo\ndef get_sample_ids_random_experiment(\n metadata_filename, delimiter, experiment_colname, sample_id_colname, rn_seed\n):\n \"\"\"\n Returns sample ids (found in gene expression df) associated with\n a given list of experiment ids (found in the metadata)\n\n Arguments\n ----------\n metadata_filename: str\n Metadata file path. An example metadata file can be found\n here: https://github.com/greenelab/ponyo/blob/master/human_tests/data/metadata/recount2_metadata.tsv\n\n delimiter: str\n Delimiter for metadata file\n\n experiment_colname: str\n Column header that contains the experiment ids\n\n sample_id_colname: str\n Column header that contains sample id that maps expression data\n and metadata\n\n \"\"\"\n random.seed(rn_seed)\n\n # Read in metadata\n metadata = pd.read_csv(metadata_filename, header=0, sep=delimiter, index_col=None)\n\n # Set index column to experiment id column\n metadata.set_index(experiment_colname, inplace=True)\n\n # Select random experiment\n rn_experiment_id = random.choice(list(np.unique(metadata.index)))\n\n # Select samples associated with experiment id\n selected_metadata = metadata.loc[rn_experiment_id]\n sample_ids = list(selected_metadata[sample_id_colname])\n\n return sample_ids\n\n\nif if_single_experiment:\n # Get sample ids for random experiment\n recount2_sample_ids = get_sample_ids_random_experiment(\n recount2_metadata_filename, \"\\t\", \"project\", \"run\", 1\n )\n\n # Subset expression data\n recount2_expression = recount2_expression.loc[recount2_sample_ids]\n\n# ## Get uncorrelated genes\n\n# +\n# Get generic genes identified by Crow et. al.\nDE_prior_filename = params[\"reference_gene_filename\"]\nref_gene_col = params[\"reference_gene_name_col\"]\nref_rank_col = params[\"reference_rank_col\"]\n\nfigure_filename = f\"gene_ranking_{col_to_rank_genes}_tmp.svg\"\n\ncorr, shared_ranking = ranking.compare_gene_ranking(\n summary_gene_ranks,\n DE_prior_filename,\n ref_gene_col,\n ref_rank_col,\n figure_filename,\n)\n# -\n\nshared_ranking.head()\n\n# +\n# Get uncorrelated gene ids\nuncorrelated_ranking = shared_ranking[\n (shared_ranking[\"Percentile (simulated)\"] > 80)\n & (shared_ranking[\"DE_Prior_Rank\"] < 20)\n]\n\nuncorrelated_genes = uncorrelated_ranking[\"Gene_Name\"]\nprint(len(uncorrelated_genes))\n\n# +\n# Get correlated gene ids\ncorrelated_ranking = shared_ranking[\n (shared_ranking[\"Percentile (simulated)\"] > 80)\n & (shared_ranking[\"DE_Prior_Rank\"] > 80)\n]\n\ncorrelated_genes = correlated_ranking[\"Gene_Name\"]\nprint(len(correlated_genes))\n# -\n\n# Save uncorrelated genes\nuncorrelated_genes.to_csv(\"uncorrelated_genes.tsv\", sep=\"\\t\")\n\n# ## Plot average expression\n\n# Get average expression of SOPHIE trained recount2 dataset\nrecount2_expression_mean = recount2_expression.mean(axis=1)\n\nrecount2_expression_mean.head()\n\n# Get average expression of Crow dataset\ncrow_expression_mean = crow_expression_data.mean(axis=1)\n\ncrow_expression_mean.head()\n\n# Check that we selecting the correct genes\nuncorrelated_genes = list(uncorrelated_genes.values)\nuncorrelated_genes[0:5]\n\nrecount2_expression_mean[uncorrelated_genes].head()\n\ncrow_expression_mean.reindex(uncorrelated_genes).head()\n\nrecount2_expression_mean.head()\n\n# +\n# Format df for plotting\nrecount2_expression_mean_toplot = pd.DataFrame(\n data={\n \"All genes\": np.log10(recount2_expression_mean),\n \"Common in RNA-seq and array\": np.log10(\n recount2_expression_mean[correlated_genes]\n ),\n \"Common in only RNA-seq\": np.log10(\n recount2_expression_mean[uncorrelated_genes]\n ),\n }\n)\n\n\nrecount2_expression_mean_toplot.head()\n\n# +\n# Violin plot of average recount2 expression highlighing uncorrelated genes\nprint(\n f\"Number of uncorrelated gene data available: {len(recount2_expression_mean[uncorrelated_genes])}\"\n)\nf = sns.violinplot(\n data=recount2_expression_mean_toplot,\n palette=[\"lightgrey\", \"#2c7fb8\", \"#add8e6\"],\n orient=\"h\",\n)\nplt.xticks(fontsize=12)\nplt.yticks(fontsize=16)\nplt.xlim(-2, 6)\n\n# Make axis thicker\nfor _, s in f.spines.items():\n s.set_linewidth(1.5)\n\nf.set_title(\"Average recount2 expression\", fontsize=18)\nf.set_xlabel(r\"Log$_{10}$ (average expression)\", fontsize=16)\n# -\n\nf.get_figure().savefig(\n \"recount2_expression_dist_gene_groups_highlight.svg\",\n format=\"svg\",\n bbox_inches=\"tight\",\n transparent=True,\n pad_inches=0,\n dpi=300,\n)\n\n# +\n# Format df for plotting\ncrow_expression_mean_toplot = pd.DataFrame(\n data={\n \"All genes\": np.log10(crow_expression_mean),\n \"Common in RNA-seq and array\": np.log10(crow_expression_mean[correlated_genes]),\n \"Common only in RNA-seq\": np.log10(crow_expression_mean[uncorrelated_genes]),\n }\n)\n\n\ncrow_expression_mean_toplot.head()\n\n# +\n# Violin plot of average array expression highlighing uncorrelated genes\nprint(\n f\"Number of uncorrelated gene data available: {len(crow_expression_mean.reindex(uncorrelated_genes))}\"\n)\ng = sns.violinplot(\n data=crow_expression_mean_toplot,\n palette=[\"lightgrey\", \"#2c7fb8\", \"#add8e6\"],\n orient=\"h\",\n)\nplt.xticks(fontsize=12)\nplt.yticks(fontsize=16)\nplt.xlim(-1, 4)\n\n# Make axis thicker\nfor _, s in g.spines.items():\n s.set_linewidth(1.5)\n\ng.set_title(\"Average array (Crow et al.) expression\", fontsize=18)\ng.set_xlabel(r\"Log$_{10}$ (average expression)\", fontsize=16)\n# -\n\ng.get_figure().savefig(\n \"array_expression_dist_gene_groups_highlight.svg\",\n format=\"svg\",\n bbox_inches=\"tight\",\n transparent=True,\n pad_inches=0,\n dpi=300,\n)\n\n# **Takeaway:**\n# * Our hypothesis is that these RNA-seq generic genes are those that are not well captured on microarray technology.\n# * Based on the distribution of the array data, it looks like these genes are fairly lowly expressed, but based on the density of the violin plot there appear to be many genes that have a similar range of expression. So these RNA-seq generic genes are as well captured as RNA-seq/array generic genes.\n# * This hypothesis does not seem to hold\n\n# **Other thoughts:**\n#\n# Looking to characterize _who_ these RNA-seq generic genes are, we used https://academic.oup.com/nar/article/48/D1/D174/5588346 to lookup the RNA-seq generic genes to determine if they have 3' end processing (i.e. polyadenylation sites)\n#\n# Manually looking up individual genes (since there doesn't seem to be a way to do this in batch), we found that most genes have at least one polyadenylated site.\n","sub_path":"explore_RNAseq_only_generic_genes/1b_expression_analysis_Crow.py","file_name":"1b_expression_analysis_Crow.py","file_ext":"py","file_size_in_byte":9927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"117565683","text":"import csv\nfrom django.http import HttpResponse\nimport datetime\n\n\n\nclass DataFile:\n datafiles_folder_path = \"datafiles/\"\n def __init__(self, name_ref):\n self.name_ref = name_ref\n self.file_ref = self.datafiles_folder_path + name_ref + \".csv\"\n\n\n\nclass DataSet:\n def __init__(self, name_ref, time_set, data_set):\n self.name_ref = name_ref\n self.time = DataFile(time_set)\n self.data = DataFile(data_set)\n\n\nclass WriterCSV:\n def __init__(self, data_set_path):\n self.time_path = \"datafiles/{0}_time.csv\".format(data_set_path)\n self.data_path = \"datafiles/{0}_data.csv\".format(data_set_path)\n\n def write(self,time,data):\n time_ref = datetime.datetime.now()\n with open(self.time_path, 'a', newline='') as csvfile:\n spamwriter = csv.writer(csvfile, delimiter=' ',\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\n spamwriter.writerow([time_ref,time])\n\n with open(self.data_path, 'a', newline='') as csvfile:\n spamwriter = csv.writer(csvfile, delimiter=' ',\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\n spamwriter.writerow([time_ref,data])\n\n# class ReaderCSV:\n #\n # # def __init__(self, data_set_path):\n # # # self.time_path = \"datafiles/{0}_time.csv\".format(data_set_path)\n # # # self.time_path = \"datafiles/YHOO_time.csv\"\n # # # self.data_path = \"datafiles/{0}_data.csv\".format(data_set_path)\n # # # self.data_path = \"datafiles/YHOO_data.csv\"\n #\n # def read(self):\n #\n # with open(self.time_path, 'r', newline='') as csvfile:\n # spamwriter = csv.reader(csvfile, delimiter=' ',\n # quotechar='|', quoting=csv.QUOTE_MINIMAL)\n # timeRows = ()\n # timeCheckRows = ()\n # for row in spamwriter:\n # timeRows = timeRows + (row[1],)\n # timeCheckRows = timeCheckRows + (row[0],)\n #\n #\n # with open(self.data_path, 'r', newline='') as csvfile:\n # spamwriter = csv.reader(csvfile, delimiter=' ',\n # quotechar='|', quoting=csv.QUOTE_MINIMAL)\n # dataRows = ()\n # dataCheckRows = ()\n # for row in spamwriter:\n # dataRows = timeRows + (row[1],)\n # dataCheckRows = timeCheckRows + (row[0],)\n #\n # if(timeCheckRows==dataCheckRows):\n # return timeRows, dataRows\n # else:\n # return None\n\n\n\n# from stockMarket.models import ShareModel; YHOO = ShareModel(name_ref = \"YHOO\");\n","sub_path":"mysite/utils/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"483438317","text":"import pipeline\n\nfrom importlib import import_module\nfrom mapreduce import context\nfrom mapreduce import mapper_pipeline\nfrom mapreduce.mapreduce_pipeline import MapreducePipeline\nfrom mapreduce import pipeline_base\nfrom mapreduce.model import MapreduceState\nfrom mapreduce.input_readers import RawDatastoreInputReader, GoogleCloudStorageInputReader\nfrom mapreduce import model\nfrom pipeline.util import for_name\n\nfrom django.utils import six\nfrom djangae.contrib.processing.mapreduce.input_readers import DjangoInputReader\n\nfrom utils import qualname\n\ndef import_callable(dotted_path):\n module_path = dotted_path.rsplit(\".\", 1)[0]\n while module_path:\n try:\n module = import_module(module_path)\n break\n except ImportError:\n module_path = module_path.rsplit(\".\", 1)[0]\n continue\n else:\n raise ImportError(\"Module not found in path: {}\".format(dotted_path))\n\n remainder = dotted_path[len(module_path):].lstrip(\".\")\n remainder_parts = remainder.split(\".\")\n\n func = module\n while remainder_parts:\n next_step = remainder_parts[0]\n if not hasattr(func, next_step):\n raise ImportError(\"Couldn't find {} in module {}\".format(next_step, module))\n func = getattr(func, next_step)\n remainder_parts = remainder_parts[1:]\n\n if not callable(func):\n raise ImportError(\"Specified path is not a callable: {}\".format(dotted_path))\n\n return func\n\nclass MapperPipeline(mapper_pipeline.MapperPipeline):\n\n def finalized(self):\n mapreduce_id = self.outputs.job_id.value\n mapreduce_state = model.MapreduceState.get_by_job_id(mapreduce_id)\n params = mapreduce_state.mapreduce_spec.mapper.params\n finalized_func = params.get('_finalized', None)\n if not finalized_func:\n return None\n finalized_func = for_name(finalized_func)\n return finalized_func(job_name=self.args[0],\n outputs=self.outputs,\n *params.get('args', []),\n **params.get('kwargs', {}))\n\ndef unpacker(obj):\n params = context.get().mapreduce_spec.mapper.params\n handler = import_callable(params[\"func\"])\n yield handler(obj, *params[\"args\"], **params[\"kwargs\"])\n\n\ndef _do_map(\n input_reader, processor_func, finalize_func, params,\n _shards, _output_writer, _output_writer_kwargs, _job_name, _queue_name,\n *processor_args, **processor_kwargs):\n\n start_pipeline = processor_kwargs.pop('start_pipeline', True)\n\n handler_spec = qualname(unpacker)\n handler_params = {\n \"func\": qualname(processor_func) if callable(processor_func) else processor_func,\n \"args\": processor_args,\n \"kwargs\": processor_kwargs,\n \"_finalized\": qualname(finalize_func) if callable(finalize_func) else finalize_func\n }\n\n handler_params.update(params)\n\n new_pipeline = MapperPipeline(\n _job_name,\n handler_spec=handler_spec,\n input_reader_spec=qualname(input_reader),\n output_writer_spec=qualname(_output_writer) if _output_writer else None,\n params=handler_params,\n shards=_shards\n )\n\n if start_pipeline:\n new_pipeline.start(queue_name=_queue_name or 'default')\n\n return new_pipeline\n\ndef extract_options(kwargs, additional=None):\n VALID_OPTIONS = {\n \"_shards\",\n \"_output_writer\",\n \"_output_writer_kwargs\",\n \"_job_name\",\n \"_queue_name\",\n }\n\n options = {}\n\n for option in VALID_OPTIONS.union(additional or set()):\n if option in kwargs:\n options[option] = kwargs.pop(option)\n\n return options\n\n\ndef map_queryset(queryset, processor_func, *args, **kwargs):\n \"\"\"\n Iterates over a queryset with mapreduce calling process_func for\n each Django instance. Calls finalize_func when the iteration completes.\n\n output_writer is optional, but should be a mapreduce OutputWriter\n subclass. Any additional args or kwargs are passed down to the\n handling function.\n\n Returns the pipeline.\n\n Valid additional options are (as kwargs):\n \"finalize_func\",\n \"_shards\",\n \"_output_writer\",\n \"_output_writer_kwargs\",\n \"_job_name\",\n \"_queue_name\",\n \"\"\"\n options = extract_options(kwargs, additional={\"finalize_func\"})\n\n params = {\n 'input_reader': DjangoInputReader.params_from_queryset(queryset),\n 'output_writer': options.pop(\"_output_writer_kwargs\", {}) or {}\n }\n\n finalize_func = options.pop(\"finalize_func\", None)\n _shards = options.pop(\"_shards\", None)\n _output_writer = options.pop(\"_output_writer\", None)\n _output_writer_kwargs = params[\"output_writer\"]\n _job_name = options.pop(\"_job_name\", \"Map task over {}\".format(queryset.model))\n _queue_name = options.pop(\"_queue_name\", None)\n\n return _do_map(\n DjangoInputReader,\n processor_func, finalize_func, params, _shards, _output_writer,\n _output_writer_kwargs,\n _job_name,\n _queue_name,\n *args, **kwargs\n )\n\n\ndef map_files(bucketname, processor_func, *args, **kwargs):\n \"\"\"\n Iterates over files in cloudstorage matching patterns in filenames list.\n\n output_writer is optional, but should be a mapreduce OutputWriter\n subclass. Any additional args or kwargs are passed down to the\n handling function.\n\n Returns the pipeline\n\n Valid additional options are (as kwargs):\n \"finalize_func\",\n \"filenames\",\n \"_shards\",\n \"_output_writer\",\n \"_output_writer_kwargs\",\n \"_job_name\",\n \"_queue_name\",\n \"\"\"\n\n options = extract_options(kwargs, additional={\"filenames\", \"finalize_func\"})\n\n filenames = options.pop(\"filenames\", None)\n\n if filenames is None:\n filenames = ['*']\n\n params = {\n 'input_reader': {\n GoogleCloudStorageInputReader.OBJECT_NAMES_PARAM: filenames,\n GoogleCloudStorageInputReader.BUCKET_NAME_PARAM: bucketname,\n },\n 'output_writer': options.pop(\"_output_writer_kwargs\", {}) or {}\n }\n\n finalize_func = options.pop(\"finalize_func\", None)\n _shards = options.pop(\"_shards\", None)\n _output_writer = options.pop(\"_output_writer\", None)\n _output_writer_kwargs = params[\"output_writer\"]\n _job_name = options.pop(\"_job_name\", \"Map task over files {} in {}\".format(filenames, bucketname))\n _queue_name = options.pop(\"_queue_name\", None)\n\n return _do_map(\n GoogleCloudStorageInputReader,\n processor_func, finalize_func, params, _shards, _output_writer,\n _output_writer_kwargs,\n _job_name,\n _queue_name,\n *args, **kwargs\n )\n\n\ndef map_entities(kind_name, namespace, processor_func, *args, **kwargs):\n \"\"\"\n Iterates over all entities of a particular kind, calling processor_func\n on each one.\n Calls finalize_func when the iteration completes.\n\n output_writer is optional, but should be a mapreduce OutputWriter subclass\n _filters is an optional kwarg which will be passed directly to the input reader\n\n Returns the pipeline\n \"\"\"\n options = extract_options(kwargs, additional={\"finalize_func\", \"_filters\"})\n\n params = {\n 'input_reader': {\n RawDatastoreInputReader.ENTITY_KIND_PARAM: kind_name,\n RawDatastoreInputReader.NAMESPACE_PARAM: namespace,\n RawDatastoreInputReader.FILTERS_PARAM: options.pop(\"_filters\", [])\n },\n 'output_writer': options.pop(\"_output_writer_kwargs\", {}) or {}\n }\n\n finalize_func = options.pop(\"finalize_func\", None)\n _shards = options.pop(\"_shards\", None)\n _output_writer = options.pop(\"_output_writer\", None)\n _output_writer_kwargs = params[\"output_writer\"]\n _job_name = options.pop(\"_job_name\", \"Map task over {}\".format(kind_name))\n _queue_name = options.pop(\"_queue_name\", None)\n\n return _do_map(\n RawDatastoreInputReader,\n processor_func, finalize_func, params, _shards, _output_writer,\n _output_writer_kwargs,\n _job_name,\n _queue_name,\n *args, **kwargs\n )\n\n\ndef map_reduce_queryset(queryset, map_func, reduce_func, output_writer, *args, **kwargs):\n\n \"\"\"\n Does a complete map-shuffle-reduce over the queryset\n\n output_writer should be a mapreduce OutputWriter subclass\n\n Returns the pipeline\n \"\"\"\n map_func = qualname(map_func)\n reduce_func = qualname(reduce_func)\n output_writer = qualname(output_writer)\n\n options = extract_options(kwargs)\n\n _shards = options.pop(\"_shards\", None)\n _job_name = options.pop(\"_job_name\", \"Map reduce task over {}\".format(queryset.model))\n _queue_name = options.pop(\"_queue_name\", 'default')\n\n pipeline = MapreducePipeline(\n _job_name,\n map_func,\n reduce_func,\n qualname(DjangoInputReader),\n output_writer,\n mapper_params={\n \"input_reader\": DjangoInputReader.params_from_queryset(queryset),\n },\n reducer_params={\n 'output_writer': options.pop(\"_output_writer_kwargs\", {}) or {}\n },\n shards=_shards\n )\n pipeline.start(queue_name=_queue_name)\n return pipeline\n\n\ndef map_reduce_entities(kind_name, namespace, map_func, reduce_func, output_writer, *args, **kwargs):\n \"\"\"\n Does a complete map-shuffle-reduce over the entities\n\n output_writer should be a mapreduce OutputWriter subclass\n _filters is an optional kwarg which will be passed directly to the input reader\n\n Returns the pipeline\n \"\"\"\n map_func = qualname(map_func)\n reduce_func = qualname(reduce_func)\n output_writer = qualname(output_writer)\n\n options = extract_options(kwargs, additional={\"_filters\"})\n\n _shards = options.pop(\"_shards\", None)\n _job_name = options.pop(\"_job_name\", \"Map reduce task over {}\".format(kind_name))\n _queue_name = options.pop(\"_queue_name\", 'default')\n\n pipeline = MapreducePipeline(\n _job_name,\n map_func,\n reduce_func,\n qualname(RawDatastoreInputReader),\n output_writer,\n mapper_params={\n 'input_reader': {\n RawDatastoreInputReader.ENTITY_KIND_PARAM: kind_name,\n RawDatastoreInputReader.NAMESPACE_PARAM: namespace,\n RawDatastoreInputReader.FILTERS_PARAM: options.pop(\"_filters\", [])\n },\n },\n reducer_params={\n 'output_writer': options.pop(\"_output_writer_kwargs\", {}) or {}\n },\n shards=_shards\n )\n pipeline.start(queue_name=_queue_name)\n return pipeline\n\n\ndef pipeline_has_finished(pipeline_id):\n \"\"\"\n Returns True if the specified pipeline has finished\n \"\"\"\n pipe = get_pipeline_by_id(pipeline_id)\n return pipe.has_finalized\n\n\ndef pipeline_in_progress(pipeline_id):\n \"\"\"\n Returns True if the specified pipeline is in progress\n \"\"\"\n return not pipeline_has_finished(pipeline_id)\n\n\ndef get_pipeline_by_id(pipeline_id):\n return pipeline.Pipeline.from_id(pipeline_id)\n\n\ndef get_mapreduce_state(pipeline):\n mapreduce_id = pipeline.outputs.job_id.value\n mapreduce_state = MapreduceState.get_by_job_id(mapreduce_id)\n return mapreduce_state\n","sub_path":"djangae/contrib/processing/mapreduce/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":11340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"589246297","text":"#\n# node_linter.py\n# Part of SublimeLinter3, a code checking framework for Sublime Text 3\n#\n# Written by Andrew de Andrade \n#\n# Project: https://github.com/SublimeLinter/SublimeLinter3\n# License: MIT\n#\n\n\"\"\"This module exports the NodeLinter subclass of Linter.\"\"\"\n\nimport json\n\nfrom os import path, access, X_OK\nfrom . import linter, persist, util\n\n\nclass NodeLinter(linter.Linter):\n\n \"\"\"\n This Linter subclass provides NodeJS-specific functionality.\n\n Linters installed with npm should inherit from this class.\n By doing so, they automatically get the following features:\n\n - Support for finding local binaries in a project's\n ./node_modules/.bin/ folder.\n\n - comment_re is defined correctly for JavaScript. If your\n linter can be found in the node_modules folder, but lints\n a different language, you should override this with the\n correct regular expression for the comments in the files\n being linted.\n\n \"\"\"\n\n comment_re = r'\\s*/[/*]'\n\n def context_sensitive_executable_path(self, cmd):\n \"\"\"\n Attempt to locate the npm module specified in cmd.\n\n Searches the local node_modules/.bin folder first before\n looking in the global system node_modules folder. return\n a tuple of (have_path, path).\n\n \"\"\"\n\n local_cmd = None\n global_cmd = util.which(cmd[0])\n\n curr_file = self.view.file_name()\n\n if curr_file:\n cwd = path.dirname(curr_file)\n\n if cwd:\n pkgpath = self.find_pkgpath(cwd)\n\n if pkgpath:\n local_cmd = self.find_local_cmd_path(pkgpath, cmd[0])\n\n if not local_cmd and not global_cmd:\n persist.printf(\n 'WARNING: {} deactivated, cannot locate local or global binary'\n .format(self.name, cmd[0])\n )\n return False, ''\n\n node_cmd_path = local_cmd if local_cmd else global_cmd\n self.executable_path = node_cmd_path\n\n return False, node_cmd_path\n\n def find_pkgpath(self, cwd):\n \"\"\"\n Search parent directories for package.json.\n\n Starting at the current working directory. Go up one directory\n at a time checking if that directory contains a package.json\n file. If it does, return that directory.\n\n \"\"\"\n\n name = 'package.json'\n pkgpath = path.normpath(path.join(cwd, name))\n\n if path.isfile(pkgpath):\n return pkgpath\n\n parent = path.normpath(path.join(cwd, '../'))\n\n if parent == '/':\n return None\n\n return self.find_pkgpath(parent)\n\n def find_local_cmd_path(self, pkgpath, cmd):\n \"\"\"\n Find a local binary in node_modules/.bin.\n\n Given package.json filepath and a local binary to find,\n look in node_modules/.bin for that binary.\n\n \"\"\"\n\n cwd = path.dirname(pkgpath)\n\n binary = self.get_pkg_bin_cmd(pkgpath, cmd)\n\n if binary:\n return path.normpath(path.join(cwd, binary))\n\n node_modules_bin = path.normpath(path.join(cwd, 'node_modules/.bin/'))\n\n binary = path.join(node_modules_bin, cmd)\n\n return binary if binary and access(binary, X_OK) else None\n\n def get_pkg_bin_cmd(self, pkgpath, cmd):\n \"\"\"\n Check is binary path is defined in package.json bin property.\n\n Loading a linter to check its own source code is a special case.\n For example, the local eslint binary when linting eslint is\n installed at ./bin/eslint.js and not ./node_modules/.bin/eslint\n\n This function checks the package.json `bin` property keys to\n see if the cmd we're looking for is defined for the current\n project.\n\n \"\"\"\n\n pkg = json.load(open(pkgpath))\n return pkg['bin'][cmd] if pkg['bin'] and pkg['bin'][cmd] else None\n","sub_path":"Backup/20141126143519/SublimeLinter/lint/node_linter.py","file_name":"node_linter.py","file_ext":"py","file_size_in_byte":3881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"591818083","text":"import sys\n\nfrom PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QTableWidget, QTableWidgetItem, QLabel, QGroupBox, QHBoxLayout, QGridLayout\nfrom PyQt5.QtGui import QPixmap, QIcon\nfrom PyQt5.QtCore import Qt\n\nfrom game_logic import Game\n\n\nclass Window(QWidget):\n \n def __init__(self):\n \n super().__init__()\n self.initUi()\n\n self.current_row = 0\n \n self.show()\n \n \n def initUi(self):\n\n\n self.game = Game()\n\n layout = QGridLayout()\n\n table_results = QTableWidget()\n layout.addWidget(table_results, 2, 1)\n \n table_guesses = QTableWidget()\n layout.addWidget(table_guesses, 2, 2)\n\n table_selection = QTableWidget()\n\n self.setup_table_results(table_results)\n self.setup_table_guesses(table_guesses)\n self.setup_table_selection(table_selection, table_guesses, table_results)\n\n game_buttons = self.setup_game_buttons(table_guesses, table_results)\n layout.addWidget(game_buttons, 1, 1, 1, 4) \n \n layout.addWidget(table_selection, 2, 3)\n\n self.setLayout(layout)\n\n\n def setup_game_buttons(self, table_guesses, table_results):\n\n group_box = QGroupBox()\n\n hbox = QHBoxLayout()\n \n button_newgame = QPushButton('New game')\n button_checkcombo = QPushButton('Check combination')\n button_backspace = QPushButton('Delete symbol')\n\n button_checkcombo.clicked.connect(\n lambda: self.check_combos(table_guesses, table_results))\n\n hbox.addWidget(button_newgame)\n hbox.addWidget(button_checkcombo)\n hbox.addWidget(button_backspace)\n\n group_box.setLayout(hbox)\n\n return group_box\n \n\n def setup_table_guesses(self, table_guesses):\n\n table_guesses.setFixedSize(258, 514)\n table_guesses.horizontalHeader().hide()\n table_guesses.verticalHeader().hide()\n \n table_guesses.setRowCount(8)\n table_guesses.setColumnCount(4)\n\n for i in range(4):\n table_guesses.setColumnWidth(i, 64)\n\n for i in range(8):\n table_guesses.setRowHeight(i, 64)\n\n\n def setup_table_results(self, table_results):\n\n table_results.setFixedSize(258, 514)\n table_results.horizontalHeader().hide()\n table_results.verticalHeader().hide()\n \n table_results.setRowCount(8)\n table_results.setColumnCount(4)\n\n for i in range(4):\n table_results.setColumnWidth(i, 64)\n\n for i in range(8):\n table_results.setRowHeight(i, 64) \n\n\n def setup_table_selection(self, table_selection, table_guesses, table_results):\n\n table_selection.setFixedSize(66, 514)\n table_selection.horizontalHeader().hide()\n table_selection.verticalHeader().hide()\n \n table_selection.setRowCount(8)\n table_selection.setColumnCount(1)\n\n table_selection.setColumnWidth(0, 64)\n\n for i in range(8):\n table_selection.setRowHeight(i, 64)\n\n table_selection.cellClicked.connect(\n lambda: self.on_table_selection_cell_clicked(\n table_selection, table_guesses, table_results))\n\n\n for i in range(1, 9):\n\n label = QLabel()\n label.setFixedSize(64, 64)\n label.setAlignment(Qt.AlignCenter)\n\n pixmap = QPixmap('icons/{}.png'.format(i))\n pixmap = pixmap.scaledToHeight(34)\n pixmap = pixmap.scaledToWidth(34)\n\n label.setPixmap(pixmap)\n\n label.setProperty('element_id', i)\n\n table_selection.setCellWidget(i - 1, 0, label)\n\n\n def set_attempt_combo(self, label, table_guesses):\n print(self.current_row)\n if not hasattr(Window.set_attempt_combo, 'column'):\n Window.set_attempt_combo.column = 0\n\n if Window.set_attempt_combo.column < 5: \n if Window.set_attempt_combo.column < 4:\n table_guesses.setCellWidget(\n self.current_row,\n Window.set_attempt_combo.column, label)\n Window.set_attempt_combo.column += 1\n\n\n def on_table_selection_cell_clicked(self, sender, table_guesses, table_results):\n\n if not hasattr(Window.set_attempt_combo, 'count'):\n Window.on_table_selection_cell_clicked.count = 0\n \n Window.on_table_selection_cell_clicked.count += 1\n \n if Window.on_table_selection_cell_clicked.count == 5:\n self.current_row += 1\n print(self.current_row) \n\n if Window.on_table_selection_cell_clicked.count < 5:\n label = sender.cellWidget(sender.currentRow(), 0)\n \n label_copy = QLabel()\n label_copy.setFixedSize(64, 64)\n label_copy.setAlignment(Qt.AlignCenter)\n \n label_copy.setPixmap(label.pixmap())\n label_copy.setProperty(\n 'element_id', label.property('element_id'))\n self.set_attempt_combo(label_copy, table_guesses)\n\n\n\n\n def check_combos(self, table_guesses, table_results):\n\n secret = self.game.secret[:]\n attempt = list()\n\n for i in range(4):\n label = table_guesses.cellWidget(\n self.current_row, i)\n attempt.append(label.property('element_id'))\n print(attempt)\n print(secret)\n\n full, partial = self.game.compare_combos(attempt, secret)\n self.display_results(full, partial, table_results) \n\n\n def display_results(self, full, partial, table_results):\n\n for i in range(full):\n \n label = QLabel()\n label.setFixedSize(64, 64)\n label.setAlignment(Qt.AlignCenter)\n\n pixmap = QPixmap('icons/bingo.png')\n pixmap = pixmap.scaledToHeight(34)\n pixmap = pixmap.scaledToWidth(34)\n\n label.setPixmap(pixmap)\n\n table_results.setCellWidget(self.current_row, i, label)\n\n for i in range(partial):\n \n label = QLabel()\n label.setFixedSize(64, 64)\n label.setAlignment(Qt.AlignCenter)\n\n pixmap = QPixmap('icons/almost.png')\n pixmap = pixmap.scaledToHeight(34)\n pixmap = pixmap.scaledToWidth(34)\n\n label.setPixmap(pixmap)\n\n table_results.setCellWidget(self.current_row, i, label)\n\n\ndef main(args):\n \n app = QApplication(args)\n window = Window()\n sys.exit(app.exec_())\n \n\nif __name__ == '__main__':\n main(sys.argv)\n","sub_path":"PyQt5/Examples/17_games/mastermind/mastermind1.py","file_name":"mastermind1.py","file_ext":"py","file_size_in_byte":6611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"97576771","text":"import time\nimport glob\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport statistics\nfrom copy import deepcopy\nimport pathlib #for creating directories\ndef find_index(the_list):\n\tfor i in range(0,len(the_list)):\n\t\tprint(i,the_list[i])\n\ndef process_readme(filename):\n\t#reads the data in readm.txt file and saves them in a list\n\t#this is done by appending experiment parameters to a list\n\t#thereby forming a list of lists\n\t#where each list represents an experiment\n\t#readme = [['par1:val1','par2:val2','par3:val3'...]\n\t#\t\t\t['par1:val1','par2:val2','par3:val3'...]\n\t#\t\t\t['par1:val1','par2:val2','par3:val3'...]\n\t#\t\t\t...]\n\t\n\t##id_map holds the mapping between experiments, start times and names (algorithm name).\n\t#id_map = {id : [name,[time_list]]}\n\t#t2id_map {t1:id,t2:id,...}\n\tglobal readme,id_map,t2id_map\n\twith open(filename) as file:\n\t\tfor data in file:\n\t\t\tdata = data.rstrip('\\n')\n\t\t\td = data.split(',')\n\t\t\tif len(d) > 1:\n\t\t\t\texp_id = d[0]\n\t\t\t\texp_id = exp_id.split(':')\n\t\t\t\tstat = d[-2]\n\t\t\t\tstat = stat.split(':')\n\t\t\t\tstart_time = stat[1]\n\t\t\t\tparas = ['nei_sensing','turn_prob_max']\n\t\t\t\tname = ''\n\t\t\t\tfor x in paras:\n\t\t\t\t\tfor y in d:\n\t\t\t\t\t\tif x in y:\n\t\t\t\t\t\t\tname = name + y.replace(':','=') + ','\n\t\t\t\tname = name.rstrip(',')\n\t\t\t\tt2id_map[start_time] = exp_id[1]#map time to id \n\t\t\t\tif exp_id[1] in id_map:\n\t\t\t\t\t\n\t\t\t\t\tid_map[exp_id[1]][1].append(start_time)\n\t\t\t\telse:\n\t\t\t\t\tid_map[exp_id[1]] = [name,[start_time]]\n\t\t\t\treadme.append(d)\n\t\t\t\ndef get_exp_time(filename,file_category):\n\t#from the file name, this function extracts the time\n\t#an experiment began. The time is returned as a string.\n\tst = filename.rfind('/')\n\ten = filename.find(file_category)\n\treturn filename[st+1:en]\n\ndef process_litter_count(filename):\n\t#litter_counts = [['exp1_time',[t_max,max_lit],[time_list],[lit_count_list]],\n\t#\t\t\t\t\t['exp2_time',[t_max,max_lit],[time_list],[lit_count_list]],\n\t#\t\t\t\t\t['exp3_time',[t_max,max_lit],[time_list],[lit_count_list]],\n\t#\t\t\t\t\t['exp4_time',[t_max,max_lit],[time_list],[lit_count_list]],\n\t#\t\t\t\t\t['exp5_time',[t_max,max_lit],[time_list],[lit_count_list]],\n\t#\t\t\t\t\t...]\n\tglobal litter_counts\n\texp_name = get_exp_time(filename,'_litter_count')\n\tt = []\n\tcount = []\n\tmax_value = [-5,-5]\n\texp_data = [exp_name,max_value,t,count]\n\tx = 0\n\twith open(filename) as file:\n\t\tfor data in file:\n\t\t\tif x >= valid_record:\n\t\t\t\tdata = data.rstrip('\\n')\n\t\t\t\tdata_list = data.split(',')\n\t\t\t\tdata_list = [float(i) for i in data_list] #OR list(map(float,data_list))\n\t\t\t\t\n\t\t\t\tif(data_list[1] > max_value[1]):\n\t\t\t\t\tmax_value[0] = data_list[0]\n\t\t\t\t\tmax_value[1] = data_list[1]\n\t\t\t\t\t#print(data_list)\n\t\t\t\tt.append(data_list[0])\n\t\t\t\tcount.append(data_list[1])\n\t\t\tx +=1\n\tlitter_counts.append(exp_data)\n\t\t\t\ndef process_robot_data(filename):\n\t#Record robot data for all experiments\n\t#robot_files = {'exp1_time': [['robot1',robot1_t1,robot1_t2,...],\n\t#\t\t\t\t\t\t\t\t['robot2',robot2_t1,robot2_t2,...],\n\t#\t\t\t\t\t\t\t\t['robot3',robot3_t1,robot3_t2,...],...],\n\t#\t\t\t\t'exp2_time': [['robot1',robot1_t1,robot1_t2,...],\n\t#\t\t\t\t\t\t\t\t['robot2',robot2_t1,robot2_t2,...],\n\t#\t\t\t\t\t\t\t\t['robot3',robot3_t1,robot3_t2,...],...]\n\t#\t\t\t\t...}\n\t#where robotn = name of n-th robot\n\t# robotn_tm = logged data of n-th robot at m-th time step\n\t# robot logged data is currently in format:\n\t# [t, x, y, yaw, turn_prob, seen_lit, nei_seen, comm_sig, lit_carried, 'status']\n\tglobal robot_files\n\texp_name = get_exp_time(filename,'_m_4wrobot')\n\trobot_data = []\n\trobot_name = ''\n\tx = 0\n\twith open(filename) as file:\n\t\tfor data in file:\n\t\t\tif x >= valid_record:\n\t\t\t\tdata = data.rstrip('\\n')\n\t\t\t\tdata = data.split(':')\n\t\t\t\trobot_name = data[0]\n\t\t\t\trobot_status = data[2]\n\t\t\t\td = data[1].split(',')\n\t\t\t\t#print(d[:-1])\n\t\t\t\t#data_to_float = [float(i) for i in d[:-1]]\n\t\t\t\t#data_to_float.append(d[-1])\n\t\t\t\tdata_to_float = [float(i) for i in d]\n\t\t\t\tdata_to_float.append(robot_status)\n\t\t\t\trobot_data.append(data_to_float)\n\t\t\tx +=1\n\trobot_data.insert(0,robot_name)\n\tif exp_name in robot_files:\n\t\trobot_files[exp_name].append(robot_data)\n\telse:\n\t\trobot_files[exp_name] = []\n\t\trobot_files[exp_name].append(robot_data)\n\t\t\n\t\t\t\ndef get_experiments_filenames(directory):\n\tglobal readme,id_map,t2id_map,litter_counts,robot_files\n\t\n\tfiles = []\n\tfor name in glob.glob(directory + '*'):\n\t\tfiles.append(name)\n\t\t#print(name)\n\t\t\n\tfiles.sort()\n\tloaded = False\n\ts = str(files)\n\tif ('readme.npy' in s and\n\t\t\t'id_map.npy' in s and\n\t\t\t't2id_map.npy' in s and\n\t\t\t'litter_counts.npy' in s and\n\t\t\t'robot_files.npy' in s):\n\t\treadme = np.load(directory + 'readme.npy').tolist()\n\t\tid_map = np.load(directory + 'id_map.npy').item()\n\t\tt2id_map = np.load(directory + 't2id_map.npy').item()\n\t\tlitter_counts = np.load(directory + 'litter_counts.npy').tolist()\n\t\trobot_files = np.load(directory + 'robot_files.npy').item()\n\t\tloaded = True\n\t\tprint('fi')\n\telse:\n\t\n\t\tfor name in files:\n\t\t\tif \"readme.md\" in name:\n\t\t\t\tprocess_readme(name)\n\t\t\telif \"_litter_count\" in name:\n\t\t\t\tprocess_litter_count(name)\n\t\t\telif \"_m_4wrobot\" in name:\n\t\t\t\t#print(name)\n\t\t\t\tprocess_robot_data(name)\n\t\tloaded = True\n\t\tnp.save(directory + 'readme.npy',readme)\n\t\tnp.save(directory + 'id_map.npy',id_map)\n\t\tnp.save(directory + 't2id_map.npy',t2id_map)\n\t\tnp.save(directory + 'litter_counts.npy',np.array(litter_counts,dtype=object))\n\t\tnp.save(directory + 'robot_files.npy',robot_files)\n\t\tprint('else')\n\t\t#print(robot_files)\n\t\t\t\n\treturn files\n\t#print(name)\ndef plot_parameter_trend(data,p,p_name):\n\tsf = plt.figure()\n\tax = sf.add_subplot(1,1,1)\n\trobot_name = data[0]\n\tx_values = [i[0] for i in data[1:]]\n\ty_values = [i[p] for i in data[1:]]\n\t#print(x_values)\n\t#print(y_values)\n\tax.set_title(robot_name + ': ' + p_name)\n\t#ax.axis('square')\n\tax.plot(x_values,y_values,label=str(p))\n\tax.legend(loc=2,bbox_to_anchor=(1,1))\n\t\ndef plot_x_parameter_trend(alg_t,data,p,p_names):\n\t#plt.figure()\n\tff,axes = plt.subplots(len(p),1)\n\t\n\trobot_name = data[0]\n\t\n\tx = [i[0] for i in data[1:]]\n\t\n\tn = 0\n\talg_id = t2id_map[alg_t]\n\taxes[n].set_title(alg_id+\":\"+robot_name+\",\"+alg_t)\n\tfor par in p:\n\t\ty = [i[par] for i in data[1:]]\n\t\t\n\t\taxes[n].plot(x,y)\n\t\taxes[n].set_ylabel(p_names[n])\n\t\tn = n + 1\n\tff.savefig(plotsNdata+robot_name+'turnProb_repulsionSensed_attractionSensed'+fName+'.pdf')\n\t\n\t\n\t\ndef robots_trajectory():\n\tparam_name = ['time', 'xloc', 'yloc','orientation','turn_prob',\n\t\t\t\t'seen_litter', 'seen_rep_neighbours', 'repulsion_sensed',\n\t\t\t\t'seen_att_neighbours','attraction_sensed',\n\t\t\t\t'litter_carried','linear_distance','angular_distance']\n\tall_experiments = list(robot_files.keys())\n\tall_experiments.sort()\n\t#print(all_experiments)\n\t#expmt_choice = ''\n\talg_id = '10'\n\t#print(t2id_map)\n\t#for xxx in all_experiments:\n\t\t#print(t2id_map[xxx])\n\t#\tif t2id_map[xxx] == alg_id:\n\t#\t\texpmt_choice = xxx\n\t#\t\tbreak\n\texpmt_choice = [all_experiments[0],all_experiments[30],all_experiments[60]]\n\t\n\t#plot_x_parameter_trend(expmt_choice,robot_files[expmt_choice][7],[4,7,9],\n\t\t#\t\t\t\t\t[param_name[4],param_name[7],param_name[9]])\n\t\t\n\tn_plots = len(all_experiments)\n\tprint(n_plots)\n\t#f_rob = plt.figure()\n\t#ax_rob = f_rob.add_subplot(1,2,1)\t\n\tindex = 1\n\tf_rob_list = []\n\tfor expmt in expmt_choice:#[all_experiments[0]]:\n\t\t#f_rob,ax_rob = plt.subplots(2,1)\n\t\t#f_rob,ax_rob = plt.subplots()\n\t\tf_robNax = plt.subplots()\n\t\tf_rob,ax_rob = f_robNax\n\t\t#f_rob_list.append(f_robNax)\n\t\t#ax_rob = f_rob.add_subplot(1,2,1)\t\n\t\t#ax = f.add_subplot(n_plots/2,2,index)\n\t\txx = t2id_map[expmt]\n\t\texpname = 'ID'+xx+','+expmt\n\t\t#expname = 'ID:10,'+id_map['10'][0]\n\t\t\n\t\tax_rob.set_title(expname)#[0]\n\t\tindex = index + 1\n\t\tx = []\n\t\ty = []\n\t\t#for p in range(len(param_name)):\n\t\t\t#plot_parameter_trend(robot_files[expmt][1],p,param_name[p])\n\t\tfor robot in robot_files[expmt]:\n\t\t\t#plot_x_parameter_trend(expmt,robot,[4,7,9],\n\t\t\t#\t\t\t\t[param_name[4],param_name[7],param_name[9]])\n\t\t#robot = robot_files['2017_12_19_13_56_14'][1]\n\t\t\tx_values = [i[1] for i in robot[1:]]\n\t\t\ty_values = [i[2] for i in robot[1:]]\n\t\t\tx.extend(x_values)\n\t\t\ty.extend(y_values)\n\t\t#print(len(robot[2]))\n\t\t\t#ax.set_xlim(xmin=-10,xmax=10)\n\t\t\t#ax.set_ylim([-10,10])\n\t\t#ax.legend(loc=2,bbox_to_anchor=(1,1))\n\t\n\t\t#x = [int(i) for i in x[::1]]\n\t\t#for i in range(10): x.extend(x) \n\t\t#y = [int(i) for i in y[::1]]\n\t\t#for i in range(10): y.extend(y) \n\t\tnb = np.linspace(-25,25,4)\n\t\tax_rob.set_xlim(-25,25)#[0]\n\t\tax_rob.set_ylim(-25,25)#[0]\n\t\t#ax_rob[0].axis('square')\n\t\t\n\t\t#plt.axis([-25,25,-25,25])\n\t\t#weights = [100] * len(y)\n\t\t#ax_rob.axis('equal')\n\t\tnbins = (nb,nb)\n\t\tH, xedges, yedges = np.histogram2d(x,y,bins=nbins)#,weights=weights)\n\t\tH = np.rot90(H)\n\t\tH = np.flipud(H)\n\t\t#print(H.shape)\n\t\tHmasked = np.ma.masked_where(H==0,H)\n\t\t#ax_rob.plot(xedges,yedges,Hmasked)#,label=robot[0])\n\t\t#ax.axis('equal')\n\t\tim = mpl.image.NonUniformImage(ax_rob,interpolation='bilinear')#[0]\n\t\txcenters = (xedges[:-1] + xedges[1:]) / 2\n\t\tycenters = (yedges[:-1] + yedges[1:]) / 2\n\t\tim.set_data(xcenters,ycenters,H)\n\t\tpcm = ax_rob.imshow(H,interpolation='bilinear')#[0],vmin = 1000, vmax = 6500\n\t\tax_rob.images.append(im)#[0]\n\t\t#ax_rob[0].scatter([0],[0],marker='.',color='b')\n\t\t\n\t\t#cbar.set_ylabel('Counts')\n\t\t#X,Y = np.meshgrid(xedges,yedges)\n\t\t#pcm = ax_rob[0].imshow(H,interpolation='bilinear')\n\t\t\n\t\t#pcm = ax_rob[0].pcolormesh(xedges,yedges,Hmasked)\n\t\t#pcm = ax_rob[0].hist2d(x,y,bins=nbins)\n\t\t\n\t\tdivider = make_axes_locatable(ax_rob)#[0]\n\t\tcax = divider.append_axes(\"right\",size=\"5%\",pad=0.25)\n\t\t#print(cax.get_xlim(),cax.get_ylim())\n\t\tf_rob.colorbar(pcm,cax=cax)\n\t\t\n\t\t#ax_rob2 = f_rob.add_subplot(1,2,2)\n\t\t#ax_rob[1].set_xlim(-25,25)\n\t\t#ax_rob[1].set_ylim(-25,25)\n\t\t#ax_rob[1].axis('square')\n\t\t#ax_rob[1].axis([-25,25,-25,25])\n\t\t\n\t\t#ax_rob[1].scatter(x,y,marker='.',s=1,linewidths=1)\n\t\t\n\t\tf_rob.tight_layout()\n\t\tf_rob.savefig(plotsNdata+'robot_trajectory_'+expname+fName+'.pdf')#, bbox_inches='tight')\n\t#plt.show()\n\t\n\t\t\n\t#f.tight_layout(pad=0.0,h_pad=1,w_pad=1)\n\t\n\t#f.savefig(folder_name+'robots_pose_data.pdf')\ndef litter_pcts():\t\t\t\n\tglobal litter_counts\n\t#pct_tlist = []\n\t#pct_litlist = []\n\t#litt_pcts.reverse()\n\t\n\tfor t in range(len(litter_counts)):\n\t\tl = litter_counts[t]\n\t\ttime = l[1][0]\n\t\tlitCount = l[1][1]\n\t\t\n\t\tfor t_loc in range(len(l[3])):\n\t\t\tif l[3][t_loc] >=lit_pcts:\n\t\t\t\t \n\t\t\t\t#t_loc = next(x[0] for x in enumerate(l[3]) if x[1] >= pl)\n\t\t\t\ttime = l[2][t_loc]\n\t\t\t\tlitCount = l[3][t_loc]\n\t\t\t\tbreak;\n\t\tl[1][0] = time\n\t\tl[1][1] = litCount\n\t\t\t\n\t\t\t#pct_tlist.append(l[2][t_loc])\n\t\t\t#pct_litlist.append(l[3][t_loc])\n\t\t#pct_tlist.append(l[1][0])\n\t\t#pct_litlist.append(l[1][1])\n\t\t#l.append(pct_tlist)\n\t\t#l.append(pct_litlist)\n\t\t\ndef litter_collected():\n\t#litter_bar_plot = [i[1][1] for i in litter_counts]\n\t#litter_bar_plot_exps = [i[0] for i in litter_counts]\n\tmpl.style.use('default')# to see available styles use print(plt.style.available)\n\t\n\tlitter_pcts()#append a list of time and litter count percents for each experiment\n\tC = []\n\tc = 0\n\tfigx = []\n\tfor p in range(len(id_map)):\n\t\tx = plt.subplots()\n\t\tfigx.append(x)\n\t\n\t#litter_bar_plot_lit = [[]] * (len(lit_pcts)+1)\n\t#litter_bar_plot_t = [[]] * (len(lit_pcts)+1)\n\tlitter_bar_plot = []\n\tlitter_bar_plot_t = []\n\t\n\tlitter_bar_plot_exps = []\n\tid_list = [] # keeps track of all the experiment id's that has been encountered\n\tid_num = []\n\tfor t in litter_counts:\n\t\texp_id = t2id_map[t[0]]\n\t\tif exp_id in id_list:\n\t\t\tx = id_list.index(exp_id)\n\t\t\tlitter_bar_plot[x].append(t[1][1])\n\t\t\tlitter_bar_plot_t[x].append(t[1][0])\n\t\t\t#litter_bar_plot_t[x].append(t[4])\n\t\t\t#litter_bar_plot_lit[x].append(t[5])\n\t\t\tfigx[x][1].plot(t[2],t[3],'b', label=exp_id)\n\t\t\t#figx[x][1].set_title(exp_id)\n\t\t\t\n\t\t\t#axx.plot(t[2],t[3], C[x], label=exp_id)\n\t\telse:\n\t\t\tlitter_bar_plot.append([t[1][1]])\n\t\t\tlitter_bar_plot_t.append([t[1][0]])\n\t\t\t#ts = t[4]\n\t\t\t#ls = t[5]\n\t\t\t\n\t\t\t#litter_bar_plot_t.append(t[4])\n\t\t\t#litter_bar_plot_lit.append(t[5])\n\t\t\t\n\t\t\tid_list.append(exp_id)\n\t\t\tlitter_bar_plot_exps.append('ID:'+exp_id+','+id_map[exp_id][0])\n\t\t\tid_num.append(exp_id)\n\t\t\t\n\t\t\tx = id_list.index(exp_id)\n\t\t\t\n\t\t\tid_c = C_all[c]\n\t\t\tc=c+1\n\t\t\tC.append(id_c)\n\t\t\tfigx[x][1].plot(t[2],t[3],'b', label=exp_id)\n\t\t\tfigx[x][1].set_title(exp_id)\n\t\t\tfigx[x][1].set_xlabel('time in seconds')\n\t\t\tfigx[x][1].set_ylabel('quantity of litter')\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t\t#axx.plot(t[2],t[3], id_c, label=exp_id)\n\t#llg=axx.legend()\n\t#figx.savefig(folder_name+'litter_collected_TimeStep.pdf', bbox_extra_artists=(llg,), bbox_inches='tight')\n\tfor i in range(len(id_list)):\n\t\tttl = id_list[i]\n\t\t#l = figx[i][1].legend()\n\t\t\n\t\tfigx[i][0].savefig(plotsNdata+ttl+fName+'.pdf', bbox_inches='tight')\n\t\t\t\n\t\t#litter_bar_plot.append(t[1][1])\n\t\t#litter_bar_plot_exps.append('ID:'+exp_id+','+id_map[exp_id][0])\n\t\n\tmeans = []\n\tstd_dev = []\n\t\n\twith open(plotsNdata + 'litter_counts'+fName+'.txt','w') as f:\n\t\tfor v,id_n in zip(litter_bar_plot, id_num):\n\t\t\tstd_dev.append(statistics.pstdev(v))\n\t\t\tmeans.append(statistics.mean(v))\n\t\t\tf.write(id_n + ',' + ','.join([str(i) for i in v]))\n\t\t\tf.write('\\n')\n\t\t\n\tnum_of_bars = len(litter_bar_plot)\n\t#means = litter_bar_plot\n\t#std_dev = [0]*num_of_bars\n\t\n\tindex = np.arange(num_of_bars)\n\tbar_width = 0.35\n\topacity = 0.4\n\terror_config = {'ecolor':'0.3'}\n\t\n\t#print(litter_bar_plot_exps)\n\t#print(litter_bar_plot)\n\t#litter_bar_plot_t = [i[1][0] for i in litter_counts]\n\t#print(litter_bar_plot_t)\n\tfig,ax = plt.subplots()\n\tax.bar(index,\n\t\t\tmeans,\n\t\t\t#bar_width,\n\t\t\talpha=opacity,\n\t\t\tcolor='b',\n\t\t\tyerr=std_dev,\n\t\t\terror_kw=error_config,\n\t\t\tlabel=' ')\n\tplt.title('Total Litter Deposited')#\n\t#plt.xlabel('Experiments')\n\tplt.ylabel('Quantity of Litter')#\n\t#plt.xticks(index,id_num)\n\tplt.tick_params(\n\t\t\t\t\taxis='x', # changes apply to the x-axis\n\t\t\t\t\twhich='both', # both major and minor ticks are affected\n\t\t\t\t\tbottom='off', # ticks along the bottom edge are off\n\t\t\t\t\ttop='off', # ticks along the top edge are off\n\t\t\t\t\tlabelbottom='off') # labels along the bottom edge are off\n\tm = ['%1.2f' % x for x in means]\n\ts = ['%1.2f' % x for x in std_dev]\n\ttable = plt.table(cellText=[m,s],\n\t\t\t\trowLabels=['mean','std_dev'],\n\t\t\t\tcolLabels=id_num,\n\t\t\t\tloc='bottom')\n\t#plt.xticks(index,litter_bar_plot_exps,rotation=90)\n\t#table.set_fontsize(12)\n\t#table.scale(5,2)\n\tlg = plt.legend()\n\t#plt.tight_layout()\n\tfig.savefig(plotsNdata+'litter_collected'+fName+'.pdf', bbox_extra_artists=(lg,table,), bbox_inches='tight')\n\t\n\tmeans_t = []\t\n\tstd_dev_t = []\n\t\t\n\twith open(plotsNdata + 'time_taken'+fName+'.txt','w') as f:\n\t\tfor v,id_n in zip(litter_bar_plot_t, id_num):\n\t\t\tstd_dev_t.append(statistics.pstdev(v))\n\t\t\tmeans_t.append(statistics.mean(v))\n\t\t\tf.write(id_n + ',' + ','.join([str(i) for i in v]))\n\t\t\tf.write('\\n')\n\t\n\tfigt,axt = plt.subplots()\n\taxt.bar(index,\n\t\t\tmeans_t,\n\t\t\t#bar_width,\n\t\t\talpha=opacity,\n\t\t\tcolor='b',\n\t\t\tyerr=std_dev_t,\n\t\t\terror_kw=error_config,\n\t\t\tlabel=' ')\n\tplt.title('Simulation time taken')#Total Litter Deposited after 200 seconds\n\t#plt.xlabel('Experiments')\n\tplt.ylabel('Time taken')#Quantity of Litter\n\t#plt.xticks(index,id_num)\n\tplt.tick_params(\n\t\t\t\t\taxis='x', # changes apply to the x-axis\n\t\t\t\t\twhich='both', # both major and minor ticks are affected\n\t\t\t\t\tbottom='off', # ticks along the bottom edge are off\n\t\t\t\t\ttop='off', # ticks along the top edge are off\n\t\t\t\t\tlabelbottom='off') # labels along the bottom edge are off\n\tm = ['%1.2f' % x for x in means_t]\n\ts = ['%1.2f' % x for x in std_dev_t]\n\ttable = plt.table(cellText=[m,s],\n\t\t\t\trowLabels=['mean','std_dev'],\n\t\t\t\tcolLabels=id_num,\n\t\t\t\tloc='bottom')\n\t#plt.xticks(index,litter_bar_plot_exps,rotation=90)\n\t#table.set_fontsize(12)\n\t#table.scale(5,2)\n\tlg = plt.legend()\n\t#plt.tight_layout()\n\tfigt.savefig(plotsNdata+'litter_collected_t'+fName+'.pdf', bbox_extra_artists=(lg,table,), bbox_inches='tight')\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\tfig2,ax2 = plt.subplots()\n\tind = 0\n\txx = range(len(means))\n\tfor b in xx:\n\t\tind = ind + bar_width/2.0\n\t\tax2.bar(ind,\n\t\t\tmeans[b],\n\t\t\tbar_width/2.0,\n\t\t\talpha=opacity,\n\t\t\tyerr=std_dev[b],\n\t\t\terror_kw=error_config,\n\t\t\tlabel=litter_bar_plot_exps[b])\n\t\n\tplt.tick_params(\n axis='x', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n bottom='off', # ticks along the bottom edge are off\n top='off', # ticks along the top edge are off\n labelbottom='off') # labels along the bottom edge are off\n \n\tlgd = plt.legend(loc=2,bbox_to_anchor=(1,1))\n\tfig2.savefig(plotsNdata+'litter_collected2'+fName+'.pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')\n\t\n\t\ndef max_litter_time():\n\t#This will plot the time it takes to forage the max litter collected\n\t#for an algorithm\n\treturn\t\n\t\ndef litter_collected_10():\n\t#This will plot bar charts of quantity of litter collected in set of\n\t#10 time steps.\n\treturn\ndef litter_vs_time():\n\t#np_litter_counts = np.array(litter_counts)\n\t#print('\\n'.join([i[0] for i in litter_counts]) )\n\n\t#plt.plot(litter_counts[0][1],litter_counts[0][2])\n\t#plt.show()\n\t#f = plt.figure()\n\n\t#ax = f.add_subplot(5,2,1)\n\t#ax.plot(litter_counts[0][1],litter_counts[0][2])\n\n\n\t#ax = f.add_subplot(5,2,4)\n\t#ax.plot(litter_counts[1][1],litter_counts[1][2])\n\t#f.tight_layout(pad=0.0)\n\t#f.savefig('test.pdf')\n\treturn\n\t\ndef main():\n\tglobal tot_lit, lit_pcts\n\t#my_files = get_experiments_filenames('results/2017_12_19/*')\n\tmy_files = get_experiments_filenames(folder_name)\n\treadMe = readme[0]\n\tfor p in readMe:\n\t\tif 'no_of_lit' in p:\n\t\t\tp = p.split(':')\n\t\t\ttot_lit = float(p[1])\n\t\t\tlit_pcts = pcts * tot_lit\n\t\t\tbreak\n\t#print([i[0] for i in robot_files['2017_12_19_13_56_14']])\n\n\t#find_index(readme[0])\n\t#np_readme = np.array(readme)\n\t#exps = np_readme[:,21]\n\t#print(exps)\n\t#PLOT TRAJECTORY OF ALL ROBOTS FOR ALL EXPERIMENTS CONDUCTED.\n\trobots_trajectory()\n\n\t#DO BAR CHART\n\tlitter_collected()\n\t\n\n\t#PLOTTING THE TREND OF LITTER OVER TIME\n\tlitter_vs_time()\n\t#print(id_map)\n\t#plt.show()\n\t\nC_all = ['#e6194b','#3cb44b','#ffe119','#0082c8','#f58231','#911eb4',\n\t\t '#46f0f0','#f032e6','#d2f53c','#fabebe','#008080','#e6beff',\n\t\t '#aa6e28','#fffac8','#800000','#aaffc3','#808000','#ffd8b1',\n\t\t '#000080','#808080','#000000']\nmy_files = []\nrobot_files = {}\nlitter_counts = []\nreadme = []\t\nid_map = {}\nt2id_map = {}\nvalid_record = 4\ntot_lit = 0# 100 #total litter in world\npcts = 0.9 #percentages expected\nlit_pcts = 0# 1 * tot_lit# [tot_lit * p for p in pcts]\nfName = '2018-05-18'\nfolder_name = 'results/'+ fName + '/'#'results/2017_12_21-exp1/'\nplotsNdata = folder_name + 'plotsNdata/'\n#print(len(my_files))\n\nif __name__ == '__main__':\n\tt0 = time.time()\n\tpathlib.Path(plotsNdata).mkdir(parents=False,exist_ok=True)\n\tmain()\n\tprint(time.time() - t0)\n\t\n\t\n","sub_path":"result_plotter.py","file_name":"result_plotter.py","file_ext":"py","file_size_in_byte":18445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"364839307","text":"\r\n# coding=utf-8\r\n\r\n\"\"\" \r\nCreated on 2017-07-13 \r\n@author: SueWang\r\n功能: 使用requests库爬取妈妈网一周内的搜索结果\r\n网址:http://so.mama.cn/\r\n\"\"\"\r\nimport os\r\nimport re\r\nimport time\r\nimport math\r\nimport queue\r\nimport random\r\nimport datetime\r\nimport requests\r\nimport threading\r\nfrom bs4 import BeautifulSoup\r\nimport pandas as pd\r\n\r\n# 线程锁 防止多线程同时读写数据\r\nmutex = threading.RLock()\r\nmutex2 = threading.Lock()\r\nq = queue.Queue() #创建任务队列\r\nWORKER_NUM = 4\r\nrandom.seed()\r\n\r\n#继承Thread的Producer类 负责将从网站抓取到的链接放入队列\r\nclass urlProducer(threading.Thread):\r\n \r\n def __init__(self,keyword, url):\r\n super(urlProducer,self).__init__()\r\n self.keyword = keyword\r\n self.url = url\r\n \r\n \r\n def run(self):\r\n global q\r\n if not (isinstance(self.keyword, str) and isinstance(self.url,str)):\r\n raise TypeError('parameter is not str')\r\n\r\n l = getThreadUrl(self.keyword,self.url)\r\n if mutex2.acquire(1):\r\n a = list(map(q.put, l))\r\n mutex2.release()\r\n \r\n\r\n# 继承Thread的Consumer类 负责从队列中取出链接 爬取文章的信息\r\nclass urlConsumer(threading.Thread):\r\n \r\n def __init__(self):\r\n super(urlConsumer,self).__init__()\r\n \r\n def run(self):\r\n global q\r\n if mutex.acquire(1):\r\n if mutex2.acquire(1):\r\n if not q.empty():\r\n url = q.get()\r\n getPageContent(url)\r\n mutex2.release()\r\n mutex.release()\r\n\r\ndef getPageUrl(keyword):\r\n \r\n TIMER = random.randint(2,5)\r\n time.sleep(TIMER)\r\n \r\n search_url = 'http://so.mama.cn/search'\r\n \r\n# 参数\r\n# q: 搜索关键词\r\n# source: 搜索结果类别, 'mamaquan'是帖子\r\n# csite: 不知道是什么\r\n# size: 每页显示结果数量\r\n# sortMode: 也不知道是什么 改成2 3 4 好像也没变化\r\n# dateline: 搜索结果时间\r\n# all - 全部时间\r\n# year - 一年内\r\n# month - 一个月内\r\n# week - 一周内\r\n# day - 一天内\r\n \r\n param = {'q':keyword,'source':'mamaquan','csite':'all','size':'50',\r\n 'sortMode':'1', 'dateline':'week'}\r\n header = {'User-Agent':'\"Mozilla/5.0 (Windows NT 10.0; WOW64; rv:52.0)'+\r\n 'Gecko/20100101 Firefox/52.0\"'}\r\n\r\n html = requests.get(search_url, params = param, headers = header) \r\n bs = BeautifulSoup(html.content,'lxml')\r\n total_result = bs.find(class_ = 'search-body__text').span.get_text()\r\n total_result = int(total_result[:len(total_result)-1])\r\n htmls = []\r\n \r\n if not total_result == 0:\r\n pageNum = math.ceil(total_result/50)\r\n \r\n pageLink = search_url + '?'\r\n \r\n for (key,value) in param.items():\r\n pageLink += (key + '=' + value + '&')\r\n \r\n htmls = [pageLink + 'page=' + str(i + 1) for i in range(pageNum)]\r\n \r\n return htmls\r\n \r\ndef getThreadUrl(keyword, url):\r\n \r\n TIMER = random.randint(5,10)\r\n time.sleep(TIMER)\r\n \r\n html = requests.get(url)\r\n bs = BeautifulSoup(html.content,'lxml')\r\n list = []\r\n \r\n if bs.find(class_ = 'search-body__text').span.text == '0个':\r\n print('no search result')\r\n return\r\n\r\n else:\r\n try:\r\n threads = bs.findAll(class_ = 'result-com__title')\r\n \r\n for thread in threads:\r\n if keyword in thread.text:\r\n list.append(thread.a['href'])\r\n except:\r\n raise\r\n \r\n return list\r\n \r\n#在页面有内容的前提下,获取内容\r\ndef getPageContent(url):\r\n \r\n global data\r\n html = requests.get(url)\r\n soup = BeautifulSoup(html.content,'lxml')\r\n \r\n url_time = \"\"\r\n url_author = \"\"\r\n url_title = \"\"\r\n url_content = \"\"\r\n \r\n try: \r\n url_time = soup.find(class_='re_from').span.text\r\n except:\r\n pass\r\n try:\r\n url_author = soup.find(class_='user_name')['title']\r\n except:\r\n pass\r\n try:\r\n url_title = soup.find(class_='h1').text.strip()\r\n except:\r\n pass\r\n try:\r\n url_content = soup.find(class_='re_content').text.strip()\r\n url_content = re.sub('[(\\r)|(\\n)]+',\"\",url_content)\r\n except:\r\n pass\r\n \r\n result = {'time':url_time,'author':url_author,'title':url_title,'content':url_content}\r\n data.append(result)\r\n\r\n#获取当前用时\r\ndef getUsedTime(start_time):\r\n \r\n if not isinstance(start_time,datetime.datetime):\r\n raise TypeError('start time is not a datetime.datetime')\r\n \r\n \r\n use_time = (datetime.datetime.now() - start_time).total_seconds()\r\n m, s = divmod(use_time, 60)\r\n h, m = divmod(m, 60)\r\n return \"%02d:%02d:%02d\" % (h, m, s)\r\n\r\n#*******************************************************************************\r\n# 程序入口\r\n#*******************************************************************************\r\nif __name__ == '__main__':\r\n\r\n folder_path = 'C:/Users/suewang/desktop/python/TM/milk_powder'\r\n df_keys = pd.read_excel(folder_path+'/search_keywords.xlsx', sheetname = 0,header = 0)\r\n if not os.path.isdir(folder_path + '/crawl_result/mama.cn'):\r\n os.makedirs(folder_path + '/crawl_result/mama.cn')\r\n \r\n for row in df_keys.iterrows():\r\n data = []\r\n try:\r\n keyword = row[1][0]\r\n print('开始抓取关键词:' + keyword + ' ...')\r\n start_time = datetime.datetime.now()\r\n today = start_time.strftime('%Y-%m-%d') \r\n pageUrl = getPageUrl(keyword)\r\n \r\n producer_thread = []\r\n if pageUrl:\r\n for page in pageUrl:\r\n t = urlProducer(keyword, page)\r\n producer_thread.append(t)\r\n t.start()\r\n for t in producer_thread:\r\n t.join()\r\n \r\n print('帖子链接抓取完成,用时:'+ getUsedTime(start_time))\r\n threads = []\r\n while not q.empty():\r\n for i in range(WORKER_NUM):\r\n thread = urlConsumer()\r\n threads.append(thread)\r\n thread.start()\r\n for thread in threads:\r\n thread.join()\r\n else:\r\n print('无搜索结果')\r\n result = pd.DataFrame(data, columns = ['time','title','author','content'])\r\n file_path = folder_path + \"/crawl_result/mama.cn/\" + keyword + '_' + today + '.xlsx'\r\n result.to_excel(file_path,encoding = 'utf-8',index = False)\r\n print( \"关键词 \" + keyword + ' 帖子抓取完成' \r\n + '\\n总用时:'+ getUsedTime(start_time) + '\\n')\r\n \r\n time.sleep(2)\r\n except Exception as e:\r\n raise\r\n print('抓取结束') ","sub_path":"mama.cn_crawler.py","file_name":"mama.cn_crawler.py","file_ext":"py","file_size_in_byte":7076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"520262908","text":"# coding=utf8\nimport requests\nimport json\nimport time\nimport redis\nimport os\nimport threading\nimport hmac\nfrom hashlib import sha1, md5\nfrom app.models.script_info import ScriptInfoModel\nfrom config import Config, UPLOAD_BASE_PATH\n\npool = redis.ConnectionPool(host=Config['redis']['host'], port=Config['redis']['port'], decode_responses=True)\nscript_model = ScriptInfoModel()\n\n\ndef hash_md5(message):\n m = md5()\n m.update(message)\n return m.hexdigest()\n\n# 用于下载 oa 脚本,提供文件描述信息({\n# \"script_id\": 0x01,\n# \"script_name\": script_name,// 脚本名称\n# \"duration\": duration,// 脚本时长\n# \"instruction_number\": ins_num,// 脚本总条数\n# \"md5\": md5_val,// 脚本MD5\n# \"md5_version\": md5_version // 脚本改动次数\n# })\nclass DownloadScriptToSync:\n ServerBaseUrl = \"http://api.oa.damosphere.com\"\n RequestScriptDetailPath = \"/app/v1/schedule/%d\"\n storage = \"storage/scripts/\"\n # redis 实例\n rds = None\n\n CurrentFile = \"\"\n\n def __init__(self):\n self.rds = redis.Redis(connection_pool=pool)\n self.storage = os.path.join(Config[UPLOAD_BASE_PATH], self.storage)\n # if not os.path.exists(self.storage):\n # os.makedirs(self.storage)\n\n @staticmethod\n def assemble_script_info_cache_key(script_id):\n return \"Scent-Script-Info-%d\" % script_id\n\n @staticmethod\n def assemble_downloading_cache_key(script_id):\n return \"Scent-Script-Downloading-%d\" % script_id\n\n @staticmethod\n def float2millisecond_bytes(fv, bytes_num=4):\n return DownloadScriptToSync.int2bytes(int(round(float(fv) * 1000)), bytes_num)\n\n @staticmethod\n def int2bytes(integer, bytes_num=4):\n arr = []\n for i in range(bytes_num):\n arr.append((integer >> (8 * (bytes_num - i - 1))) & 0xff)\n return list(bytes(arr))\n\n @staticmethod\n def byte2number(byte_array, start=0, number_byte_count=4):\n val = 0\n for i in range(start, start + number_byte_count):\n val = (val << 8) + byte_array[i]\n return val\n\n @staticmethod\n def parse_small_id(smell_sn=\"\"):\n smell_sn = smell_sn.upper()\n while ord(smell_sn[0]) >= 65 and ord(smell_sn[0]) <= 90:\n smell_sn = smell_sn[1:]\n l = len(smell_sn) - 1\n while ord(smell_sn[l]) >= 65 and ord(smell_sn[l]) <= 90:\n smell_sn = smell_sn[0:-1]\n l = len(smell_sn) - 1\n return int(smell_sn)\n\n def get_file_name_by_md5(self, md5_val):\n return self.storage + md5_val.lower()\n\n # 将脚本处理成文件,用于下载\n def download_script_from_server(self, script_id):\n url = self.ServerBaseUrl + self.RequestScriptDetailPath % script_id\n print(\"Request \" + url)\n res = requests.get(url)\n json_data = json.loads(res.text)\n\n if not ((json_data['code'] == 1 or json_data['code'] == 200) and json_data['data']):\n raise Exception(json_data['msg'])\n\n schedule_list = json_data['data']['schedule']\n schedule_md5 = json_data['data']['MD5']\n script_name = json_data['data']['scheduleName']\n md5_version = json_data['data']['md5_version']\n print(\"md5_version = \", md5_version)\n ins_num = len(schedule_list)\n duration = float(schedule_list[ins_num - 1]['many']) + float(schedule_list[ins_num - 1]['keep'])\n duration = int(round(duration * 1000))\n\n byte_array = [0, 1]\n byte_array += DownloadScriptToSync.int2bytes(duration, 4)\n byte_array += DownloadScriptToSync.int2bytes(ins_num, 2)\n for i in range(ins_num):\n byte_array += DownloadScriptToSync.int2bytes(i + 1, 2)\n byte_array += DownloadScriptToSync.int2bytes(self.parse_small_id(schedule_list[i][\"sn\"]), 4)\n byte_array += self.float2millisecond_bytes(schedule_list[i][\"many\"], 4)\n byte_array += self.float2millisecond_bytes(schedule_list[i][\"keep\"], 4)\n md5_val = hash_md5(bytes(byte_array)).upper()\n\n if schedule_md5 != md5_val:\n print(\"MD5 Not Match, OA \", schedule_md5, \"CAL\", md5_val)\n\n script_model.create(script_id=script_id, md5_version=md5_version,\n attribute_columns=[\n ('script_name', script_name),\n (\"duration\", duration),\n (\"instruction_number\", ins_num),\n (\"md5\", md5_val),\n ])\n\n storage_file = self.get_file_name_by_md5(md5_val)\n if os.path.exists(storage_file):\n return {\n \"script_id\": 0x01,\n \"script_name\": script_name,\n \"duration\": duration,\n \"instruction_number\": ins_num,\n \"md5\": md5_val,\n \"md5_version\": md5_version\n }\n\n print(\"md5_val=\", md5_val)\n os.mknod(storage_file)\n print(\">>>>>>>>>>>>>>>>>> storage file path:\",storage_file)\n with open(storage_file, \"wb\") as f:\n f.write(bytes(byte_array))\n return {\n \"script_id\": 0x01,\n \"script_name\": script_name,\n \"duration\": duration,\n \"instruction_number\": ins_num,\n \"md5\": md5_val,\n \"md5_version\": md5_version\n }\n # return byte_array\n\n RequestScriptID = 0\n\n def get_script_info_from_cache(self, script_id):\n self.RequestScriptID = script_id\n cache_key = self.assemble_script_info_cache_key(script_id)\n val = self.rds.get(cache_key)\n if val is None:\n return None\n ret = eval(val)\n return ret\n\n def get_script_info(self, script_id):\n self.RequestScriptID = script_id\n val = self.get_script_info_from_cache(script_id)\n if val is None:\n downloading_key = self.assemble_downloading_cache_key(script_id)\n is_downloading = self.rds.get(downloading_key)\n if is_downloading is None:\n th = threading.Thread(target=self.thread_download_file)\n th.start()\n th.join()\n return self.get_script_info_from_cache(script_id)\n else:\n try_times = 5\n while try_times > 0:\n try_times -= 1\n time.sleep(1)\n val = self.get_script_info_from_cache(script_id)\n if val is not None:\n return val\n return None\n else:\n return val\n\n def thread_download_file(self):\n cache_key = self.assemble_script_info_cache_key(self.RequestScriptID)\n try:\n rr = self.download_script_from_server(self.RequestScriptID)\n self.rds.setex(cache_key, 600, str(rr))\n except Exception as ex:\n print(ex)\n raise ex\n\n\n# if __name__ == \"__main__\":\n#\n# print(Config['app_path'])\n# # ddd = DownloadScriptToSync()\n# # ddd.float2millisecond_bytes(\"8.174 \", 4)\n# # v = \"8.174\"\n# # print(int(round(float(v) * 1000)), v)\n","sub_path":"app/core/download_script_from_oa.py","file_name":"download_script_from_oa.py","file_ext":"py","file_size_in_byte":7133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"144060027","text":"import os;\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\",\"First_project.settings\")\n\nimport django\ndjango.setup()\n\n## fake populator script\n\nimport random\nfrom hello_world.models import AccessRecord ,Webpage,Topic\nfrom faker import Faker\n\nfakegen=Faker()\n\ntopic=[\"Search\",\"Social\",\"Marketplace\",\"News\",\"Game\",\"Streaming\"]\n\ndef add_topic():\n t=Topic.objects.get_or_create(top_name=random.choice(topic))[0]\n t.save()\n return t\ndef populate(N=20):\n\n for entry in range(N):\n # get topic\n top= add_topic()\n\n # fake data\n fake_url=fakegen.url()\n fake_date=fakegen.date()\n fake_name=fakegen.company()\n\n # new fake web page entry\n webpg=Webpage.objects.get_or_create(\n topic=top,# you are passing a topic obj not a name due to decl of f.key\n url=fake_url,\n name=fake_name)[0]\n # new fake acess records\n acc_rec=AccessRecord.objects.get_or_create(\n name=webpg# passing a webpg obj not a string since it a forgin key\n ,date=fake_date)[0]\n\nif __name__==\"__main__\":\n print(\"populating !!\")\n populate()\n print(\"Done the Deed ☺☻♥♣♦♠•◘○«\")\n","sub_path":"Notes&CodeTesting/web/Django/First_project/Populate_hello_world_data.py","file_name":"Populate_hello_world_data.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"229101508","text":"# encoding utf-8\n'''\n@Author: william\n@Description:\n@time:2020/6/19 16:51\n'''\nimport torch.nn as nn\nfrom torch.nn.utils import weight_norm\n\n\nclass Chomp1d(nn.Module):\n def __init__(self, chomp_size):\n super(Chomp1d, self).__init__()\n self.chomp_size = chomp_size\n\n def forward(self, x):\n \"\"\"\n 其实这就是一个裁剪的模块,裁剪多出来的padding\n \"\"\"\n return x[:, :, :-self.chomp_size].contiguous()\n\n\nclass TemporalBlock(nn.Module):\n \"\"\"\n 相当于一个Residual block\n :param n_inputs: int, 输入通道数\n :param n_outputs: int, 输出通道数\n :param kernel_size: int, 卷积核尺寸\n :param stride: int, 步长,一般为1\n :param dilation: int, 膨胀系数\n :param padding: int, 填充系数\n :param dropout: float, dropout比率\n \"\"\"\n def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation, padding, dropout=0.2):\n super(TemporalBlock, self).__init__()\n\n self.c = nn.Conv2d(in_channels=5, out_channels=n_outputs, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation)\n # self.conv1 = weight_norm(nn.Conv1d(in_channels=n_inputs, out_channels=n_outputs, kernel_size= kernel_size, stride=stride, padding=padding, dilation=dilation))\n self.conv1 = weight_norm(nn.Conv2d(in_channels=5, out_channels=5, kernel_size= kernel_size, stride=stride, padding=padding, dilation=dilation))\n self.chomp1 = Chomp1d(padding)\n self.relu1 = nn.Tanh()\n self.dropout1 = nn.Dropout(dropout)\n\n self.conv2 = weight_norm(nn.Conv2d(in_channels=5, out_channels=5, kernel_size=kernel_size,\n stride=stride, padding=padding, dilation=dilation))\n # self.conv2 = weight_norm(nn.Conv1d(in_channels=n_outputs, out_channels=n_outputs, kernel_size=kernel_size,\n # stride=stride, padding=padding, dilation=dilation))\n self.chomp2 = Chomp1d(padding)\n self.relu2 = nn.Tanh()\n self.dropout2 = nn.Dropout(dropout)\n\n self.net = nn.Sequential(self.conv1, self.chomp1, self.relu1, self.dropout1,\n self.conv2, self.chomp2, self.relu2, self.dropout2)\n #self.downsample = nn.Conv1d(in_channels=5, out_channels=n_outputs, kernel_size=2 * padding + 1, padding=padding)\n self.downsample = nn.Conv2d(in_channels=5, out_channels=5, kernel_size=(2 * padding + 1, 1), padding=padding)\n self.relu = nn.ReLU()\n self.init_weights()\n\n def init_weights(self):\n \"\"\"\n 参数初始化\n :return:\n \"\"\"\n self.conv1.weight.data.normal_(0, 0.01)\n self.conv2.weight.data.normal_(0, 0.01)\n if self.downsample is not None:\n self.downsample.weight.data.normal_(0, 0.01)\n\n def forward(self, x):\n \"\"\"\n :param x: size of (Batch, input_channel, seq_len)\n :return:\n \"\"\"\n #x=x.permute(0,3,1,2)\n # a = self.conv1(x)\n # a = self.chomp1(a)\n # a = self.relu1(a)\n # a = self.dropout1(a)\n # b = self.conv2(a)\n # b = self.chomp2(b)\n # b = self.relu2(b)\n # out = self.dropout2(b)\n out = self.net(x)\n res = x if self.downsample is None else self.downsample(x)\n return self.relu(out + res)\n\n\nclass TemporalConvNet(nn.Module):\n def __init__(self, num_inputs, num_channels, kernel_size=2, dropout=0.2):\n \"\"\"\n TCN,目前paper给出的TCN结构很好的支持每个时刻为一个数的情况,即sequence结构,\n 对于每个时刻为一个向量这种一维结构,勉强可以把向量拆成若干该时刻的输入通道,\n 对于每个时刻为一个矩阵或更高维图像的情况,就不太好办。\n :param num_inputs: int, 输入通道数\n :param num_channels: list,每层的hidden_channel数,例如[25,25,25,25]表示有4个隐层,每层hidden_channel数为25\n :param kernel_size: int, 卷积核尺寸\n :param dropout: float, drop_out比率\n \"\"\"\n super(TemporalConvNet, self).__init__()\n layers = []\n num_levels = len(num_channels)\n for i in range(num_levels):\n dilation_size = 2 ** i\n in_channels = num_channels[i] if i == 0 else num_channels[i-1]\n # in_channels = out_channels if i == 0 else num_channels[i-1]\n # in_channels = 1\n out_channels = num_channels[0]\n layers += [TemporalBlock(in_channels, out_channels, kernel_size, stride=1, dilation=dilation_size,\n padding=(kernel_size-1) * dilation_size, dropout=dropout)]\n\n self.network = nn.Sequential(*layers)\n\n def forward(self, x):\n \"\"\"\n 输入x的结构不同于RNN,一般RNN的size为(Batch, seq_len, channels)或者(seq_len, Batch, channels),\n 这里把seq_len放在channels后面,把所有时间步的数据拼起来,当做Conv1d的输入尺寸,实现卷积跨时间步的操作,\n 很巧妙的设计。\n :param x: size of (Batch, input_channel, seq_len)\n :return: size of (Batch, output_channel, seq_len)\n \"\"\"\n return self.network(x)\n","sub_path":"TCN.py","file_name":"TCN.py","file_ext":"py","file_size_in_byte":5264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"610719737","text":"import shutil\nimport os\nvideo_source_path = \"D:/graduation_project/workspace/dataset/HMDB51/\"\ntxt_source_path = \"D:/graduation_project/workspace/dataset/HMDB51_tt_txt/\"\ntarget_root_path = \"D:/graduation_project/workspace/dataset/HMDB51/video/\"\n\nif not os.path.exists(target_root_path):\n os.makedirs(target_root_path)\n\nfile_list = os.listdir(txt_source_path)\ntype = 0\nDict = {}\ntt_count = {\"train1\": 0, \"test1\": 0, \"train2\": 0, \"test2\": 0, \"train3\": 0, \"test3\": 0}\nvDict = {}\nvDict2 = {}\nvCount = -1\nsplit2num = {\"train1\":0, \"train2\":1, \"train3\":2, \"test1\":3, \"test2\":4, \"test3\":5}\nfor file in file_list:\n tmp = file.split(\"_test_\")\n tmp_video_path = video_source_path + tmp[0] + '/'\n\n if tmp[0] not in Dict.keys():\n type += 1\n Dict[tmp[0]] = type\n\n type_num = Dict[tmp[0]]\n num = tmp[1][5]\n\n file_to_read = open(txt_source_path + file, 'r')\n\n content = file_to_read.readline()\n while content:\n content = content.split(' ')\n if content[0] not in vDict.keys():\n vCount += 1\n name = str(vCount) + '_' + str(type_num) + '.avi'\n vDict[content[0]] = name\n vDict2[name] = []\n video_path = tmp_video_path + content[0]\n target_path = target_root_path + name\n shutil.copy(video_path, target_path)\n\n n = vDict[content[0]]\n if content[1] is '1' or '2':\n split = \"train\" + num if content[1] is '1' else \"test\" + num\n target_name = str(tt_count[split]) + '_' + str(type_num) + '_' + str(split2num[split])\n vDict2[n].append(target_name)\n tt_count[split] += 1\n content = file_to_read.readline()\n\n file_to_read.close()\n\n# Dict = sorted(Dict.items(), key=lambda d: d[1])\n# file_to_write = open(target_root_path + 'map.txt', 'w')\n# for k in Dict.keys():\n# file_to_write.write(k + ' ' + str(Dict[k]) + '\\n')\n# file_to_write.close()\n\n\nfile_to_write = open(video_source_path + 'vDict.txt', 'w')\nfor k in vDict.keys():\n print(k, vDict[k], file=file_to_write)\nfile_to_write.close()\nfile_to_write = open(video_source_path + 'vDict2.txt', 'w')\nfor k in vDict2.keys():\n print(k, ','.join([i for i in vDict2[k]]), file=file_to_write)\nfile_to_write.close()\n\n# Dict = sorted(Dict.items(), key=lambda e: e[1], reverse=False)\nfile_to_write = open(video_source_path + 'map.txt', 'w')\nfor k in Dict.keys():\n print(k, str(Dict[k]), file=file_to_write)\nfile_to_write.close()","sub_path":"submit/0_HMDB51.py","file_name":"0_HMDB51.py","file_ext":"py","file_size_in_byte":2451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"404931726","text":"\n\nfrom xai.brain.wordbase.nouns._pesticide import _PESTICIDE\n\n#calss header\nclass _PESTICIDES(_PESTICIDE, ):\n\tdef __init__(self,): \n\t\t_PESTICIDE.__init__(self)\n\t\tself.name = \"PESTICIDES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"pesticide\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_pesticides.py","file_name":"_pesticides.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"584078653","text":"__author__ = 'Tushar Makkar '\n\nimport argparse\nimport collections\nimport json\nimport logging\nimport os\nimport os.path\nimport sys\n\nfrom enum import Enum\n\n\nclass MessageCategory(Enum):\n FACEBOOK = 1\n WHATSAPP = 2\n INSTAGRAM = 3\n HIKE = 4\n\n\nclass Message:\n def __init__(self, content, sender):\n self._content = content\n self._sender = sender\n\n def get_content(self):\n return self._content\n\n def get_sender(self):\n return self._sender\n\n\nclass MessageMedium:\n def __init__(self, file_name):\n self._file_name = file_name\n\n def _read_file(self):\n extension = os.path.splitext(self._file_name)[1][1:].strip()\n if extension == 'txt':\n with open(self._file_name) as f:\n return f.read()\n elif extension == 'json':\n with open(self._file_name) as f:\n return json.load(f)\n return None\n\n def convert_to_messages(self, participants=None):\n return\n\n\nclass FacebookMessageMedium(MessageMedium):\n def convert_to_messages(self, participants=None):\n data = self._read_file()\n actual_data = data['messages']\n list_of_messages = []\n for i in actual_data:\n if i.get('content'):\n list_of_messages.append(Message(i['content'], i['sender_name']))\n return list_of_messages\n\n\nclass WhatsappMessageMedium(MessageMedium):\n def convert_to_messages(self, participants=None):\n data = self._read_file()\n chat_data = data.split('\\n')[1:]\n participants = set()\n for data in chat_data:\n if len(participants) >= 2:\n break\n participants.add(data.split(' - ')[-1].split(':')[0])\n participants = list(participants)\n participants = [i + ': ' for i in participants]\n list_of_messages = []\n for data in chat_data:\n sender = None\n for i in participants:\n if i in data:\n sender = i\n break\n if data and sender:\n list_of_messages.append(Message(data.split(sender)[-1], sender[:-2]))\n return list_of_messages\n\n\nclass InstagramMessageMedium(MessageMedium):\n def convert_to_messages(self, participants=None):\n data = self._read_file()\n actual_data = None\n for i in data:\n if set(participants) == set(i['participants']):\n actual_data = i\n break\n list_of_messages = []\n for i in actual_data['conversation']:\n if i.get('text'):\n list_of_messages.append(Message(i.get('text'), i.get('sender')))\n return list_of_messages\n\n\nclass HikeMessageMedium(MessageMedium):\n def convert_to_messages(self, participants=None):\n data = self._read_file()\n line_data = data.split('\\n')\n chat_data = line_data[1:]\n participants = ['me-', line_data[0].split('Chat with ')[-1] + '-']\n list_of_messages = []\n for data in chat_data:\n sender = None\n for i in participants:\n if i in data:\n sender = i\n break\n if data and sender:\n list_of_messages.append(Message(data.split(sender)[-1], sender[:-1]))\n return list_of_messages\n\n\nclass MessageWordCloud:\n def __init__(self, logger, data_map):\n self._logger = logger\n self._data_map = data_map\n self._class_map = {\n MessageCategory.WHATSAPP: WhatsappMessageMedium,\n MessageCategory.HIKE: HikeMessageMedium,\n MessageCategory.FACEBOOK: FacebookMessageMedium,\n MessageCategory.INSTAGRAM: InstagramMessageMedium\n }\n self._stopwords = []\n self._get_stopwords()\n\n def _get_stopwords(self):\n data_files = ['assets/' + i for i in os.listdir('assets')]\n for i in data_files:\n with open(i) as f:\n self._stopwords.extend(f.readlines())\n self._stopwords = set([i.strip().lower() for i in self._stopwords])\n\n def convert_to_frequency(self, all_messages, msg_category, chop_off_num):\n data_dict = collections.defaultdict(lambda: collections.defaultdict(int))\n min_length = 10 ** 10\n for data_msg_category in all_messages:\n if msg_category == data_msg_category or msg_category == 'all':\n for msg in all_messages[data_msg_category]:\n actual_message = msg.get_content()\n split_message = actual_message.split()\n for word in split_message:\n if len(word) > 2 and word.lower() not in self._stopwords and word.isalnum():\n data_dict[data_msg_category.name][word.lower()] += 1\n data_dict[data_msg_category.name] = sorted(data_dict[data_msg_category.name].items(), key=lambda x: -1 * x[1])\n data_dict[data_msg_category.name] = [i for i in data_dict[data_msg_category.name] if i[1] > chop_off_num]\n min_length = min(len(data_dict[data_msg_category.name]), min_length)\n for data_msg_category in data_dict:\n data_dict[data_msg_category] = data_dict[data_msg_category][:min_length]\n return data_dict\n\n @staticmethod\n def _make_js(freq_map):\n data_list = []\n for data in freq_map:\n for ind_word_cnt in freq_map[data]:\n data_list.append({\"x\": str(ind_word_cnt[0]), \"value\": ind_word_cnt[1], \"category\": data})\n main_string = \"let data = %s\" % data_list\n main_string = main_string.replace(\"'\", '\"')\n with open('final_data.js', 'w') as f:\n f.write(main_string)\n\n def build_final_data_js_file(self, participants_ig_name, freq_chop_off_num=2):\n all_messages = {}\n for type_of_message in self._class_map:\n self._logger.info(\"Getting messages for %s\" % type_of_message)\n all_messages[type_of_message] = self._class_map[type_of_message](self._data_map[type_of_message]).convert_to_messages(\n participants=participants_ig_name)\n freq_map = self.convert_to_frequency(all_messages, 'all', freq_chop_off_num)\n self._make_js(freq_map)\n\n\nif __name__ == '__main__':\n _logger = logging.getLogger()\n _logger.setLevel(logging.DEBUG)\n _ch = logging.StreamHandler(sys.stdout)\n _ch.setFormatter(logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"))\n _logger.addHandler(_ch)\n args = argparse.ArgumentParser(description='Argument parser for wordcloud')\n args.add_argument('--ig_names', help='Instagram name list', type=str, required=True, nargs=2)\n args.add_argument('--folder', help='Folder where files are present', type=str, required=True)\n args = args.parse_args()\n _data_map = {\n MessageCategory.FACEBOOK: os.path.join(args.folder, 'fb.json'),\n MessageCategory.HIKE: os.path.join(args.folder, 'hike.txt'),\n MessageCategory.INSTAGRAM: os.path.join(args.folder, 'ig.json'),\n MessageCategory.WHATSAPP: os.path.join(args.folder, 'wp.txt')\n }\n MessageWordCloud(_logger, _data_map).build_final_data_js_file(args.ig_names, 0)\n","sub_path":"message_wordcloud.py","file_name":"message_wordcloud.py","file_ext":"py","file_size_in_byte":7241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"569175761","text":"\nclass Car:\n\tdef __init__ (self, idx):\n\t\tself.idx = idx\n\n\t\tself.x = 0\n\t\tself.y = 0\n\t\tself.t = 0\n\n\tdef get_nearest_ride (self, rides, max_time, bonus):\n\t\tmin_time = 1000000\n\t\tmin_modified_time = 1000000\n\t\tmin_ride = -1\n\t\tmin_idx = -1\n\n\t\tfor list_idx, ride in enumerate(rides):\n\t\t\tif self.t + self.dist(ride) > ride.last_call:\n\t\t\t\tcontinue\n\n\t\t\tstart_time = max(ride.start_time, self.t + self.dist(ride))\n\n\t\t\tif start_time + ride.duration > max_time:\n\t\t\t\tcontinue\n\n\t\t\tmodified_time = start_time - bonus if start_time == ride.start_time else start_time\n\t\t\t\n\t\t\tif modified_time < min_modified_time:\n\t\t\t\tmin_time = start_time\n\t\t\t\tmin_ride = ride\n\t\t\t\tmin_idx = list_idx\n\t\t\t\tmin_modified_time = modified_time\n\n\t\treturn min_time, min_ride, min_idx\n\n\n\tdef dist (self, ride):\n\t\treturn abs(self.x - ride.start[0]) + abs(self.y - ride.start[1])\n\n\t\t\t\n\t\t\t\n\n","sub_path":"car.py","file_name":"car.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"147314796","text":"#!/data/data/com.termux/files/usr/bin/env python\nfrom math import sqrt\nimport argparse\n\ndef roots(a,b,c):\n disc=4*a*c\n if b**2 >= disc:\n x1=(sqrt((b**2-disc))-b)/2\n x2=((-1*sqrt((b**2-disc))))/2\n return x1,x2\n else:\n return\nif __name__=='__main__':\n ps=argparse.ArgumentParser()\n ps.add_argument('-a',action='store',dest='a')\n ps.add_argument('-b',action='store',dest='b')\n ps.add_argument('-c',action='store',dest='c')\n pr=ps.parse_args()\n a,b,c=float(pr.a),float(pr.b),float(pr.c)\n print(\"The roots of the function :\")\n print(\"%f x² + %f x + %f = 0\\n is\\n\"%(a,b,c))\n print(roots(a,b,c))\n\n","sub_path":"quadratic_roots.py","file_name":"quadratic_roots.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"236621603","text":"\"\"\"mycrm URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.10/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom django.contrib import admin\n\nfrom master import views as mas \n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^login/$', mas.try_to_login, name='login'),\n url(r'^logout/$', mas.try_to_logout, name='logout'), \n\n url(r'^$', mas.go_home, name='user_homepage'),\n url(r'^primary_groups/$', mas.list_of_prim_groups, name='prim_groups'),\n url(r'^primary_group_details/(?P\\d+)/$', mas.prim_group_details, name='prim_group_detail'), \n url(r'^tag_list/$', mas.show_all_tags, name='tag_list'),\n url(r'^tag_details/(?P\\d+)/$', mas.tag_details, name='tag_detail'),\n #search by city / city deepdive\n url(r'^city_list/$', mas.list_of_cities, name='city_list'),\n #url(r'^city_details/(?P\\w+)$', mas.city_deepdive, name='city_detail2'), \n url(r'^city_details/(?P[\\w ]+)$', mas.city_deepdive, name='city_detail2'), \n #link for contact deepdive \n url(r'^contact_details/(?P\\d+)/$', mas.indiv_contact_details, name='cont_detail'),\n #edit master details\n url(r'^edit_master/(?P\\d+)/$', mas.edit_master_entry, name='edit_master'),\n url(r'^edit_master_next_date/(?P\\d+)/$', mas.edit_next_contact_in_master, name='edit_master_next_date'),\n url(r'^edit_master_starred/(?P\\d+)/$', mas.edit_starred_in_master, name='edit_master_starred'),\n url(r'^edit_master_prospect/(?P\\d+)/$', mas.edit_prospect_in_master, name='edit_master_prospect'),\n #add a new master entry\n url(r'^add_master/$', mas.add_master_entry, name='add_master'),\n #url(r'^%s/%s/' % (model._meta.app_label, model._meta.model_name), include(model_admin.urls))\n url(r'^add_touchpoint/(?P\\d+)/$', mas.add_contact_touchpoint, name='add_touchpoint'),\n url(r'^edit_touchpoint/(?P\\d+)/(?P\\d+)/$', mas.edit_contact_touchpoint, name='edit_touchpoint'),\n url(r'^search_contacts/$', mas.search_master_list, name='search_master'),\n #calendar view\n url(r'^forecast_past_due/$', mas.calendar_forecast_past_due, name='cal_view_pastdue'),\n url(r'^forecast/$', mas.calendar_forecast_today, name='cal_view_today'),\n url(r'^forecast_plus1/$', mas.calendar_forecast_plus1, name='cal_view_todayplus1'),\n url(r'^forecast_plus2/$', mas.calendar_forecast_plus2, name='cal_view_todayplus2'),\n url(r'^forecast_beyond/$', mas.calendar_forecast_beyond, name='cal_beyond_that'),\n url(r'^forecast_all/$', mas.calendar_forecast_all, name='cal_all'),\n url(r'^forecast_all_by_imp/$', mas.calendar_forecast_all_by_import, name='cal_all_by_imp'),\n #list of starred folks \n url(r'^starred/$', mas.list_of_starred, name='list_starred'),\n url(r'^prospects/$', mas.list_of_prospects, name='list_prospects'),\n #dateTimeViewBootstrap3\n #url(r'^example/$', mas.dateTimeViewBootstrap3, name='exp'),\n #url(r'^test/$', mas.testview2, name='test_view'),\n]\n\n","sub_path":"src/mycrm/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"60305105","text":"import argparse\nimport datetime\nimport json\n\nfrom country.models import City\nfrom flight.models import Flight\nfrom meta_config import IMPORTER_DATA_DIRNAME\n\n\ndef flights_storage(date):\n with open(f'{IMPORTER_DATA_DIRNAME}/flights_data/flights_data{date}.json', 'r+', encoding='utf-8') as f:\n data = json.loads(f.read())\n \n for line in data:\n dept_city = line['dept_city']\n if City.objects.filter(code=dept_city).count() == 0:\n dept_city = None\n else:\n dept_city = City.objects.filter(code=dept_city).get()\n arri_city = line['arri_city']\n if City.objects.filter(code=arri_city).count() == 0:\n arri_city = None\n else:\n arri_city = City.objects.filter(code=arri_city).get()\n kwargs = {'code': line['code'], 'dept_time': line['dept_time'], 'dept_city': dept_city, 'arri_time': line['arri_time'], 'arri_city': arri_city, 'condition': line['condition']}\n Flight.objects.create(**kwargs)\n # except:\n # print('插入新闻数据错误')\n\n\ndef flight_import():\n parser = argparse.ArgumentParser(description='Flight-Spider')\n parser.add_argument('--date', required=False, type=str)\n args = parser.parse_args()\n if args.date:\n flights_storage(args.date)\n else:\n flights_storage(datetime.datetime.now().strftime('%Y-%m-%d'))\n","sub_path":"spiders/flight_importer.py","file_name":"flight_importer.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"48214340","text":"import socket\n\nHOST = 'localhost' # The server's hostname or IP address\nPORT = 50000 # The port used by the server\n\n\ndef start_select_client():\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.connect((HOST, PORT))\n message = b'Hello, world'\n print('Sending', repr(message), \"\\n\")\n s.sendall(message)\n data = s.recv(1024)\n print('Received', repr(data))\n","sub_path":"tcp/select/SelectClient.py","file_name":"SelectClient.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"397077564","text":"import os\n\nimport pytest\n\nfrom squeak.core import HASH_LENGTH\nfrom squeak.core.signing import CSigningKey\nfrom squeak.core.signing import CSqueakAddress\nfrom squeak.core.signing import CSqueakAddressError\nfrom squeak.core.signing import CVerifyingKey\nfrom squeak.core.signing import PUB_KEY_LENGTH\n\n\ndef make_hash():\n return os.urandom(HASH_LENGTH)\n\n\nclass TestSignVerify(object):\n\n def test_sign_verify(self):\n signing_key = CSigningKey.generate()\n verifying_key = signing_key.get_verifying_key()\n\n data = make_hash()\n signature = signing_key.sign(data)\n\n assert verifying_key.verify(data, signature)\n\n def test_serialize_deserialize_verifying_key(self):\n signing_key = CSigningKey.generate()\n verifying_key = signing_key.get_verifying_key()\n\n serialized = bytes(verifying_key)\n deserialized = CVerifyingKey(serialized)\n serialized2 = bytes(deserialized)\n deserialized2 = CVerifyingKey(serialized2)\n\n data = make_hash()\n signature = signing_key.sign(data)\n\n assert verifying_key.verify(data, signature)\n assert deserialized2.verify(data, signature)\n assert len(serialized2) == PUB_KEY_LENGTH\n\n def test_serialize_deserialize_signing_key(self):\n signing_key = CSigningKey.generate()\n verifying_key = signing_key.get_verifying_key()\n\n key_data = str(signing_key)\n deserialized_signing_key = CSigningKey(key_data)\n\n data = make_hash()\n signature = deserialized_signing_key.sign(data)\n\n assert verifying_key.verify(data, signature)\n\n def test_sign_verify_other_data(self):\n signing_key = CSigningKey.generate()\n verifying_key = signing_key.get_verifying_key()\n\n data = make_hash()\n data2 = make_hash()\n signature = signing_key.sign(data)\n\n assert not verifying_key.verify(data2, signature)\n\n def test_address_to_pubkey(self):\n signing_key = CSigningKey.generate()\n verifying_key = signing_key.get_verifying_key()\n\n address = CSqueakAddress.from_verifying_key(verifying_key)\n pubkey_script = address.to_scriptPubKey()\n\n address_from_script = CSqueakAddress.from_scriptPubKey(pubkey_script)\n\n assert address_from_script == address\n\n def test_address_to_string(self):\n signing_key = CSigningKey.generate()\n verifying_key = signing_key.get_verifying_key()\n\n address = CSqueakAddress.from_verifying_key(verifying_key)\n address_str = str(address)\n\n address_from_str = CSqueakAddress(address_str)\n\n assert address_from_str == address\n assert isinstance(address, CSqueakAddress)\n assert isinstance(address_from_str, CSqueakAddress)\n\n def test_address_to_pubkey_invalid(self):\n with pytest.raises(CSqueakAddressError):\n CSqueakAddress.from_scriptPubKey(b'')\n","sub_path":"tests/core/test_signing.py","file_name":"test_signing.py","file_ext":"py","file_size_in_byte":2889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"125675272","text":"# -*- coding: utf-8 -*-\n\"\"\"\n著作權所有 (C) 民國103年 意傳文化科技\n開發者:薛丞宏\n網址:http://意傳.台灣\n語料來源:請看各資料庫內說明\n\n本程式乃自由軟體,您必須遵照SocialCalc設計的通用公共授權(Common Public Attribution License, CPAL)來修改和重新發佈這一程式,詳情請參閱條文。授權大略如下,若有歧異,以授權原文為主:\n\t1.得使用、修改、複製並發佈此程式碼,且必須以通用公共授權發行;\n\t2.任何以程式碼衍生的執行檔或網路服務,必須公開該程式碼;\n\t3.將此程式的原始碼當函式庫引用入商業軟體,且不需公開非關此函式庫的任何程式碼\n\n此開放原始碼、共享軟體或說明文件之使用或散佈不負擔保責任,並拒絕負擔因使用上述軟體或說明文件所致任何及一切賠償責任或損害。\n\n��灣言語工具緣起於本土文化推廣與傳承,非常歡迎各界用於商業軟體,但希望在使用之餘,能夠提供建議、錯誤回報或修補,回饋給這塊土地。\n\n感謝您的使用與推廣~~勞力!承蒙!\n\"\"\"\nfrom 臺灣言語工具.解析整理.文章粗胚 import 文章粗胚\nfrom 臺灣言語工具.解析整理.拆文分析器 import 拆文分析器\nfrom 臺灣言語工具.音標系統.閩南語.教會羅馬字音標 import 教會羅馬字音標\nfrom 舊臺灣言語工具.資料庫.資料庫連線 import 資料庫連線\nfrom 臺灣言語工具.解析整理.物件譀鏡 import 物件譀鏡\nfrom 臺灣言語工具.基本元素.公用變數 import 分字符號\nfrom 臺灣言語工具.基本元素.公用變數 import 分詞符號\nimport cProfile\nfrom 臺灣言語工具.解析整理.轉物件音家私 import 轉物件音家私\nimport gzip\n\nclass 匯出數位典藏文本數位字對齊:\n\t到逝 = True\n\t揣數位典藏文本段資料庫 = 資料庫連線.prepare(\n\t\t'SELECT \"流水號\",\"時代\",\"年\",\"類\",\"類二\",\"漢羅文\",\"全羅文\",\"無齊記號\"' + \n\t\t'FROM \"台語文數位典藏\".\"改過字資料\" WHERE \"流水號\">=1 ORDER BY \"流水號\"')\n\tdef __init__(self):\n\t\tself.粗胚 = 文章粗胚()\n\t\tself.分析器 = 拆文分析器()\n\t\tself.家私=轉物件音家私()\n\t\t譀鏡 = 物件譀鏡()\n\t\tself.有問題 = 0\n\t\t漢羅資料=[]\n\t\t音標資料=[]\n\t\tfor 流水號, 時代, 年, 類, 類二, 漢羅文, 全羅文, 無齊記號 in self.揣數位典藏文本段資料庫():\n\t\t\tprint(流水號)\n\t\t\t漢羅文章物件=self.轉物件而且標準化(漢羅文.strip())\n\t\t\t全羅文章物件=self.轉物件而且標準化(全羅文.strip())\n\t\t\t漢羅資料.extend(\n\t\t\t\t譀鏡.看型(漢羅文章物件,\n\t\t\t\t\t物件分字符號=分詞符號, 物件分詞符號=分詞符號)\n\t\t\t\t.split('\\n'))\n\t\t\t音標資料.extend(\n\t\t\t\t譀鏡.看型(全羅文章物件,\n\t\t\t\t\t物件分字符號=分字符號, 物件分詞符號=分詞符號)\n\t\t\t\t.split('\\n'))\n\t\t\tif len(漢羅資料) != len(音標資料):\n\t\t\t\tprint(流水號,'漢羅全羅無對齊')\n# \t\t\tprint('@@')\n# \t\t\tprint(全羅文)\n\t\t漢羅檔案 = gzip.open('臺語文數位典藏漢羅文.txt', 'wt')\n\t\tfor 逝 in 漢羅資料:\n\t\t\t逝=逝.strip()\n\t\t\tif 逝!='':\n\t\t\t\tprint(逝, file=漢羅檔案)\n\t\t漢羅檔案.close()\n\t\t全羅檔案 = gzip.open('臺語文數位典藏全羅文.txt', 'wt')\n\t\tfor 逝 in 音標資料:\n\t\t\t逝=逝.strip()\n\t\t\tif 逝!='':\n\t\t\t\tprint(逝, file=全羅檔案)\n\t\t全羅檔案.close()\n\t\tprint(self.有問題)\n\t\t\t\n\tdef 轉物件而且標準化(self,閩南語):\n\t\ttry:\n\t\t\t處理了全羅文 = self.粗胚.建立物件語句前處理減號(教會羅馬字音標, 閩南語)\n\t\t\t章物件 = self.分析器.建立章物件(處理了全羅文)\n\t\t\t標準章物件=self.家私.轉做標準音標(教會羅馬字音標, 章物件)\n\t\t\treturn 標準章物件\n\t\texcept Exception as 問題:\n# \t\t\t\tprint(全羅文)\n\t\t\tprint(問題)\n\t\t\tself.有問題 += 1\n\t\t\tprint('閩南語')\n\t\t\tprint(閩南語)\n\nif __name__ == '__main__':\n\tcProfile.run('匯出數位典藏文本數位字對齊()')\n","sub_path":"舊臺灣言語工具/資料佮語料匯入整合/數位典藏/匯出數位典藏文本數位字對齊.py","file_name":"匯出數位典藏文本數位字對齊.py","file_ext":"py","file_size_in_byte":4004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"272572630","text":"from dbconnect import connection\nimport datetime\nfrom flask import session\n\nHTML_TEMPLATE =\"\"\"\n{% extends \"header.html\" %}\n{% block body %}\n
  • Total Number of users: {{x}}
  • \n
  • Total Number of Animes :{{y}}
  • \n
  • Toatal Number of genre of Animes available:{{z}}
  • \n
  • {{STR}}
  • \n
\n{% endblock %}\n\"\"\"\n\ndef make_report():\n try:\n c, conn = connection()\n x = c.execute(\"SELECT * FROM User\")\n y = c.execute(\"SELECT * FROM ANIME\")\n z = c.execute(\"SELECT DISTINCT GENRE FROM ANIME_GENRE\")\n userId = str(session['username'])\n c.close()\n conn.close()\n STR = \"User: \" + userId + \" Generated Report at: \" + str(datetime.datetime.now())\n\n filename = \"report.html\"\n savePath ='/home/acesps/PycharmProjects/Anime-Database/templates/' + filename\n saveData = (HTML_TEMPLATE.replace(\"{{x}}\",str(x)).replace(\"{{y}}\",str(y)).replace(\"{{z}}\",str(z)).replace(\"{{STR}}\",STR))\n template_save = open(savePath, \"w\")\n template_save.write(saveData)\n template_save.close()\n except Exception as e:\n print(str(e))\n return\n\n","sub_path":"report_making.py","file_name":"report_making.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"92752688","text":"from django.core.validators import EmailValidator\n\nfrom .email import Email\n\n\nclass Login(Email.LoginView):\n template_name = 'otp/edu_email.html'\n\n\nclass EduEmailValidator(EmailValidator):\n def __init__(self, blacklist, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if isinstance(blacklist, str):\n blacklist = [blacklist]\n self.blacklist = set(blacklist)\n\n def validate_domain_part(self, domain_part):\n if domain_part in self.blacklist:\n return False\n domain_part = domain_part.lower()\n result = domain_part.endswith((\n '.edu.cn',\n '.edu.hk',\n '.edu.mo',\n '.edu.tw',\n '.edu.my',\n ))\n return result\n\n\nclass GetChallenge(Email.GetChallengeView):\n identity_validator = EduEmailValidator([\n 'hnu.edu.cn',\n 'smail.nju.edu.cn',\n 'njust.edu.cn',\n 'sjtu.edu.cn',\n 'std.uestc.edu.cn',\n 'ustc.edu.cn',\n 'mail.ustc.edu.cn',\n 'zju.edu.cn',\n ])\n\n\nclass EduEmail(Email):\n name = '其他高校'\n LoginView = Login\n GetChallengeView = GetChallenge\n","sub_path":"otp/backends/edu_email.py","file_name":"edu_email.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"605989680","text":"import numpy as np\nimport sklearn.metrics as metrics\nimport matplotlib.pyplot as plt\n\n\nfrom tensorflow.keras.callbacks import ModelCheckpoint,CSVLogger,LearningRateScheduler\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Input\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.layers import Flatten\nfrom tensorflow.keras.layers import Conv2D\nfrom tensorflow.keras.layers import Conv1D\nfrom tensorflow.keras.layers import Dropout\nfrom tensorflow.keras.layers import MaxPooling2D\nfrom tensorflow.keras.layers import MaxPooling1D\nfrom tensorflow.keras.layers import LSTM\nfrom tensorflow.keras.layers import BatchNormalization\nfrom tensorflow.keras.layers import Activation\nfrom tensorflow.keras.layers import AveragePooling2D\nfrom tensorflow.keras.layers import add\nfrom tensorflow.keras import regularizers\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras.datasets import cifar10\nfrom tensorflow.keras import optimizers\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img\nfrom tensorflow.keras.models import Sequential\nimport os\n\n# ----- declare some constant\n\ntrain_folder = '../cropped/train'\nval_folder = '../cropped/validate'\n\noutput_folder = 'output'\nclasses = [\"cendol\", \"ice kachang\", \"tauhuay\", \"tausuan\"]\nbatch_size = 32\nIMG_SIZE = 300\n\ndef implt(img):\n plt.figure()\n plt.imshow(img)\n plt.axis('off')\n\nplt.style.use('ggplot') # if want to use the default style, set 'classic'\nplt.rcParams['ytick.right'] = True\nplt.rcParams['ytick.labelright']= True\nplt.rcParams['ytick.left'] = False\nplt.rcParams['ytick.labelleft'] = False\nplt.rcParams['font.family'] = 'Arial'\nmodelname = 'pre-doubleconv-addlayers2'\nseed = 7\nnp.random.seed(seed)\n\n# .............................................................................\ndatagen = ImageDataGenerator()\ntrain_it = datagen.flow_from_directory(train_folder, shuffle=True, target_size=(IMG_SIZE,IMG_SIZE), class_mode='categorical', batch_size=batch_size)\nval_it = datagen.flow_from_directory(val_folder, shuffle=True, target_size=(IMG_SIZE,IMG_SIZE), class_mode='categorical', batch_size=batch_size)\n\nfilepath = os.path.join(output_folder, modelname + \".hdf5\")\ncheckpoint = ModelCheckpoint(filepath, \n monitor='val_acc', \n verbose=0, \n save_best_only=True, \n mode='max')\n # Log the epoch detail into csv\ncsv_logger = CSVLogger(os.path.join(output_folder, modelname +'.csv'))\ncallbacks_list = [checkpoint,csv_logger]\n\n#---- model creation code\ndef createModel():\n model = Sequential()\n model.add(Conv2D(32, kernel_size = (3, 3), activation='relu', padding='same', input_shape=(IMG_SIZE, IMG_SIZE, 3)))\n model.add(Conv2D(32, kernel_size = (3, 3), activation='relu', padding='same'))\n model.add(MaxPooling2D(pool_size=(2,2)))\n model.add(Conv2D(64, kernel_size=(3,3), padding='same', activation='relu'))\n model.add(Conv2D(64, kernel_size=(3,3), padding='same', activation='relu'))\n model.add(MaxPooling2D(pool_size=(2,2)))\n model.add(Conv2D(64, kernel_size=(3,3), padding='same', activation='relu'))\n model.add(Conv2D(64, kernel_size=(3,3), padding='same', activation='relu'))\n model.add(MaxPooling2D(pool_size=(2,2)))\n model.add(Conv2D(96, kernel_size=(3,3), padding='same', activation='relu'))\n model.add(Conv2D(96, kernel_size=(3,3), padding='same', activation='relu'))\n model.add(MaxPooling2D(pool_size=(2,2)))\n model.add(Conv2D(96, kernel_size=(3,3), padding='same', activation='relu'))\n model.add(Conv2D(96, kernel_size=(3,3), padding='same', activation='relu'))\n model.add(MaxPooling2D(pool_size=(2,2)))\n model.add(Conv2D(96, kernel_size=(3,3), padding='same', activation='relu'))\n model.add(Conv2D(96, kernel_size=(3,3), padding='same', activation='relu'))\n model.add(MaxPooling2D(pool_size=(2,2)))\n model.add(Flatten())\n model.add(Dense(512, activation='relu'))\n model.add(Dense(512, activation='relu'))\n model.add(Dense(4, activation = 'softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model\n \n # define model\nmodel = createModel()\nmodel.summary()\nfrom tensorflow.keras.utils import plot_model\nmodel_file = os.path.join(output_folder, modelname + \"_model.png\")\nplot_model(model, \n to_file=model_file, \n show_shapes=True, \n show_layer_names=False,\n rankdir='TB')\n# fit model\nmodel.fit_generator(train_it, validation_data=val_it,epochs=50,callbacks=callbacks_list)","sub_path":"pre1-raw-train.py","file_name":"pre1-raw-train.py","file_ext":"py","file_size_in_byte":4714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"541531802","text":"\"\"\"\r\nMonte Carlo Tic-Tac-Toe Player\r\n\"\"\"\r\n#Must be run in codeskulptor2\r\n\r\nimport random\r\nimport poc_ttt_gui\r\nimport poc_ttt_provided as provided\r\n\r\n# Constants for Monte Carlo simulator\r\n# You may change the values of these constants as desired, but\r\n# do not change their names.\r\nNTRIALS = 10 # Number of trials to run\r\nSCORE_CURRENT = 1.0 # Score for squares played by the current player\r\nSCORE_OTHER = 1.0 # Score for squares played by the other player\r\n \r\n# Add your functions here.\r\ndef mc_trial (board, player):\r\n \"\"\"\r\n Runs through a whole game, with the given player making the first move.\r\n Alternates with each player making random moves until someone wins or a draw happens.\r\n Does not return anything, modifies the given board.\r\n \"\"\"\r\n mover = player\r\n while (board.check_win() == None): #plays until someone wins, or the game is a draw\r\n empties = board.get_empty_squares()\r\n selection = random.choice(empties) \r\n board.move(selection[0], selection[1], mover) #make a move into a random empty square\r\n mover = provided.switch_player(mover) #swap player\r\n\r\ndef mc_update_scores(scores, board, player):\r\n \"\"\"\r\n Scores the given board. If the given player won, each tile the player\r\n used gets a constant added to it, and each tile the loser used gets a constant\r\n subtracted from the corresponding tile in the score grid. If the given player lost\r\n then instead the player gets their scores subtracted in the score grid and the winner\r\n gets their scores added. Empty tiles get a score of 0, and if the game was a draw\r\n the score of each tile is 0. Directly modifies the score grid, does not return anything.\r\n \"\"\"\r\n rows = range(board.get_dim())\r\n cols = range(board.get_dim())\r\n current = SCORE_CURRENT\r\n other = SCORE_OTHER\r\n other_player = provided.switch_player(player)\r\n \r\n if (board.check_win() == provided.DRAW): #if the game is a draw don't add anything to scores\r\n for row in rows:\r\n for col in cols:\r\n scores[row][col] += 0\r\n else:\r\n if (board.check_win() == player): #making sure we subtract from opponent scores if the player wins\r\n other *= -1\r\n else: #making sure we subtract player scores if the player loses\r\n current *= -1\r\n for row in rows: #updating scores over the board\r\n for col in cols:\r\n if board.square(row, col) == player:\r\n scores[row][col] += current\r\n elif board.square(row, col) == other_player:\r\n scores[row][col] += other\r\n else:\r\n scores[row][col] += 0 \r\n \r\n \r\n#mc_update_scores(scores, use_board, provided.PLAYERX) #test\r\ndef get_best_move(board, scores):\r\n \"\"\"\r\n Looks over the scores of empty tiles and returns the tile with the highest score\r\n as a tuple. If there is a tie for highest score, the function randomly returns one of the\r\n tied tiles.\r\n \"\"\"\r\n empties = board.get_empty_squares()\r\n length = range(len(empties))\r\n max = scores[empties[0][0]][empties[0][1]]\r\n best_tiles = []\r\n \r\n for entry in length: #iterating over empty cells and seeing which has the highest score\r\n if scores[empties[entry][0]][empties[entry][1]] >= max:\r\n max = scores[empties[entry][0]][empties[entry][1]]\r\n for entry in length:\r\n if scores[empties[entry][0]][empties[entry][1]] == max:\r\n best_tiles.append(empties[entry]) #adding cells that have the max score to a list\r\n return random.choice(best_tiles) #randomly selecting cell among the ones with a max score\r\n \r\ndef mc_move(board, player, trials):\r\n \"\"\"\r\n Does a monte carlo simulation to determine which should be the next move. Does a certain number \r\n of trials, scores the moves of the trials, then chooses the highest scoring move to do. \r\n \"\"\"\r\n size = board.get_dim()\r\n counter = 0\r\n scores = [[0 for count in range(size)] for value in range(size)]\r\n while (counter < trials): #doing all the trials starting from current state of board\r\n copied_board = board.clone()\r\n mc_trial(copied_board, player)\r\n mc_update_scores(scores, copied_board, player) #scoring all the trials\r\n counter += 1\r\n return get_best_move(board, scores) #returning whichever cell had the best score\r\n\r\n# Test game with the console or the GUI. Uncomment whichever \r\n# you prefer. Both should be commented out when you submit \r\n# for testing to save time.\r\n\r\nprovided.play_game(mc_move, NTRIALS, False) \r\npoc_ttt_gui.run_gui(3, provided.PLAYERX, mc_move, NTRIALS, False)\r\n","sub_path":"TicTacToeMonteCarlo.py","file_name":"TicTacToeMonteCarlo.py","file_ext":"py","file_size_in_byte":4764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"195897908","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import load_boston\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.metrics import r2_score\n\nclass Linear_Ref:\n def __init__(self) :\n self.final_rmse = None\n self.final_r2 = None\n self.final_df_predict = None\n self.final_df_test = None\n self.final_feature_name = None\n\n boston = load_boston()\n self.df_data = pd.DataFrame(boston.data,columns=boston.feature_names)\n self.df_target = pd.DataFrame(boston.target)\n self.n = len(self.df_data)\n self.n_test = int(0.2*self.n)\n self.n_train = self.n - self.n_test\n\n self.df_target_train = self.df_target.iloc[:self.n_train].copy()\n self.df_target_test = self.df_target.iloc[self.n_train:].copy()\n\n self.reg = LinearRegression()\n self.multi_reg = LinearRegression()\n def initPolyRegDegree(self,deg):\n self.poly_lin = LinearRegression()\n self.poly = PolynomialFeatures(degree=deg)\n\n def split_train_test(self,data):\n df_train = data.iloc[:self.n_train].copy()\n df_test = data.iloc[self.n_train:].copy()\n return df_train,df_test\n\n def linear_reg(self,df_train):\n self.reg.fit(df_train,self.df_target_train)\n\n def poly_reg(self,x):\n df_train_poly_form = self.poly.fit_transform(x)\n self.poly_lin.fit(df_train_poly_form,self.df_target_train.values)\n\n def predict(self,df_test):\n return pd.DataFrame(self.reg.predict(df_test))\n\n def rmse(self,df_predict):\n mse = mean_squared_error(self.df_target_test,df_predict)\n return np.sqrt(mse)\n\n def r_sq_score(self,df_predict):\n return r2_score(self.df_target_test, df_predict,multioutput='variance_weighted')\n\n\n def plot(self,final_rmse,final_r2,final_df_predict,final_df_test,final_feature_name,reg_type):\n print('\\n\\n***************Plotting for '+reg_type+'***************')\n print('Most optimum col is: '+final_feature_name)\n print('R-Squared Value is: '+str(final_r2))\n print('RMSE Value is: '+str(final_rmse))\n plt.figure(figsize=(8,6)) #set before plotting\n plt.scatter(final_df_test,self.df_target_test,color='blue')\n plt.plot(final_df_test,final_df_predict,color='red')\n plt.title(\"Scatter Plot of test data and predicted value - \"+reg_type)\n plt.xlabel(final_feature_name)\n plt.ylabel(\"MEDV\")\n plt.show()\n\n def start_lin_reg(self) :\n for col in self.df_data.columns.values :\n df_x = self.df_data[col].copy().to_frame(col)\n\n df_x_train, df_x_test = self.split_train_test(df_x)\n self.linear_reg(df_x_train)\n\n df_y_predict = self.predict(df_x_test)\n rmseVal = self.rmse(df_y_predict)\n r2 = self.r_sq_score(df_y_predict)\n\n if self.final_r2 == None or (r2>self.final_r2):\n self.final_r2 = r2\n self.final_df_test = df_x_test.values\n self.final_df_predict = df_y_predict.values\n self.final_feature_name = col\n self.final_rmse = rmseVal\n self.plot(self.final_rmse,self.final_r2,self.final_df_predict,self.final_df_test,self.final_feature_name,'Linear Regression')\n\n def start_poly_reg(self,reg_type):\n df_x = (self.df_data[self.final_feature_name].copy()).to_frame(self.final_feature_name)\n df_x_train, df_x_test = self.split_train_test(df_x)\n\n self.poly_reg(df_x_train.values)\n \n x_poly = self.poly.fit_transform(df_x_test.values)\n y_predict = self.poly_lin.predict(x_poly)\n\n df_y_predict = pd.DataFrame(y_predict)\n rmseVal = self.rmse(df_y_predict)\n r2 = self.r_sq_score(df_y_predict)\n\n self.plot(rmseVal,r2,y_predict,df_x_test.values,self.final_feature_name,reg_type)\n\n def multiple_regression(self):\n print('\\n\\n***************Plotting for Multiple Refression***************')\n df_x, df_x_test_temp = self.split_train_test(self.df_data)\n df_x_cor = df_x.copy()\n df_x_cor['MEDV'] = self.df_target_train\n cor = df_x_cor.corr().abs()['MEDV']\n cor = cor.sort_values(ascending=False)\n new_cor = cor.drop(labels = ['MEDV']).head(3)\n \n df_x = pd.DataFrame()\n df_x_test = pd.DataFrame()\n \n for index, value in new_cor.items():\n df_x[index] = df_x_cor[index]\n df_x_test[index] = df_x_test_temp[index]\n \n self.multi_reg.fit(df_x,self.df_target_train)\n \n df_y_predict = pd.DataFrame(self.multi_reg.predict(df_x_test))\n \n rmseVal = self.rmse(df_y_predict)\n r2 = self.r_sq_score(df_y_predict)\n ind_var = 4; #p\n sample_size = len(df_x) #n\n \n adjusted_r2 = 1 - ((1-r2**2) * (sample_size - 1)/(sample_size-ind_var-1))\n print(\"RMSE: \",rmseVal)\n print(\"R2: \",r2)\n print(\"Adjusted R2: \",adjusted_r2)\n\nif __name__ == '__main__':\n lr = Linear_Ref()\n lr.start_lin_reg()\n lr.initPolyRegDegree(2)\n lr.start_poly_reg('Polynomial Regression - Degree 2')\n lr.initPolyRegDegree(20)\n lr.start_poly_reg('Polynomial Regression - Degree 20')\n lr.multiple_regression()\n\n","sub_path":"assignment1/linearReg.py","file_name":"linearReg.py","file_ext":"py","file_size_in_byte":5445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"475894904","text":"import requests, json, pymysql,re\r\n#获取歌手歌曲列表\r\nwhile True:\r\n #如果想翻页修改链接中的 &p=\r\n url = \"https://c.y.qq.com/soso/fcgi-bin/client_search_cp?new_json=1&cr=1&p=1&n=20&w={}\"\r\n new_url = url.format(str(input(\"请输入你要下载的歌曲或歌星:\")))\r\n req = requests.get(new_url)\r\n # print(req.text[9:-1])\r\n data = json.loads(req.text[9:-1])\r\n songs = data[\"data\"][\"song\"][\"list\"]\r\n i = 0\r\n mid_list = []\r\n title_list = []\r\n for song in songs:\r\n i+=1\r\n title = song[\"title\"]\r\n mid = song[\"mid\"]\r\n mid_list.append(mid)\r\n title_list.append(title)\r\n print(\"{0}:{1}\".format(i,title))\r\n pass\r\n num = int(input(\"请输入你要下载的歌曲序号(如:1):\"))\r\n print(num)\r\n # 拼接params并请求得到歌曲文件所有请求详情\r\n params = {\r\n \"data\": '{\"req\":{\"module\":\"CDN.SrfCdnDispatchServer\",\"method\":\"GetCdnDispatch\",\"param\":{\"guid\":\"5779709973\",\"calltype\":0,\"userip\":\"\"}},\"req_0\":{\"module\":\"vkey.GetVkeyServer\",\"method\":\"CgiGetVkey\",\"param\":{\"guid\":\"5779709973\",\"songmid\":[\"%s\"],\"songtype\":[0],\"uin\":\"0\",\"loginflag\":1,\"platform\":\"20\"}},\"comm\":{\"uin\":0,\"format\":\"json\",\"ct\":20,\"cv\":0}}'%mid_list[num-1]\r\n }\r\n url = \"https://u.y.qq.com/cgi-bin/musicu.fcg\"\r\n req1 = requests.get(url, params=params)\r\n data2 = json.loads(req1.text)\r\n # 得到歌曲的详情页链接并请求\r\n purl = data2[\"req_0\"][\"data\"][\"midurlinfo\"][0][\"purl\"]\r\n detail_url = \"http://111.202.85.148/amobile.music.tc.qq.com/\"+purl\r\n print(detail_url)\r\n req2 = requests.get(detail_url)\r\n with open(\"songsdata/\"+title_list[num-1]+\".mp3\", \"wb+\") as f:\r\n f.write(req2.content)\r\n y_n = str(input(\"是否继续下载歌曲,如(y/n):\"))\r\n if y_n.lower() == \"y\":\r\n continue\r\n elif y_n == 'n':\r\n break\r\n else:\r\n print(\"输入有误,请重新输入\")\r\n","sub_path":"QQmusic(xuan).py","file_name":"QQmusic(xuan).py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"515361859","text":"from __future__ import division\nfrom __future__ import print_function\n\nimport random\nimport tensorflow as tf\nimport numpy as np\nfrom variational_encoder import VariationalEncoder\nflags = tf.app.flags\n\n# Setting random seed\nflags.DEFINE_integer(\"random_seed\", 314, \"Value of random seed\")\nflags.DEFINE_integer(\"batch_size\", 100, \"Number of examples in minibatch\")\nflags.DEFINE_integer(\"hidden_dim\", 500, \"Dimension of hidden state\")\nflags.DEFINE_integer(\"input_dim\", 784, \"Input dimension\")\nflags.DEFINE_integer(\"epochs\", 75, \"Numbers of epochs to train\")\nflags.DEFINE_integer(\"latent_dim\", 20, \"latent varialbe dimension\")\nflags.DEFINE_float(\"lr\", 0.001, \"initial learning rate\")\nflags.DEFINE_boolean(\"isTrain\", True, \"is training phase\")\nFLAGS = flags.FLAGS\ntf.set_random_seed(FLAGS.random_seed)\nrandom.seed(FLAGS.random_seed)\n\n\n\ndef main(_):\n config = FLAGS\n config.activation= tf.nn.softplus\n with tf.Session() as sess:\n vae = VariationalEncoder(sess, config)\n\n if config.isTrain:\n vae.train()\n else: \n vae.saver.restore(vae.sess, \"./checkpts/sampling3\")\n # reconstruction:\n test_example , _ = mnist.train.next_batch(100)\n test_mean = vae.reconstruct(test_example) \n f_example = open(\"./saved/MNIST_sample\",\"wb\")\n f_reconstr = open(\"./saved/MNIST_recons\",\"wb\")\n np.save(f_example, np.asarray(test_example))\n np.save(f_reconstr, np.asarray(test_mean))\n # random sample: \n random_i = vae.sample()\n f_i = open(\"./saved/random_sample\", \"wb\")\n np.save(f_i, np.asarray(random_i))\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"175753112","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 27 13:33:59 2018\n\n@author: thosvarley\n\"\"\"\nimport networkx as nx\nimport numpy as np \nimport matplotlib.pyplot as plt\ntmats = []\n\nfor i in range(1,5):\n tmats.append(np.load(\"average_transmat_Q{0}_no_self.npy\".format(str(i))))\n \ntmats = np.array(tmats)\n\nfor i in range(len(tmats)):\n G = nx.from_numpy_array(tmats[i], create_using=nx.MultiDiGraph())\n weights = []\n for j in G.edges(data=True):\n x = j[2]['weight']\n weights.append(x)\n weights = [3*((x/np.nanmax(weights))**2) for x in weights]\n plt.subplots()\n nx.draw_circular(G, with_labels = True, width = weights, edge_cmap=\"grays\")\n plt.title(\"Markov Graph of Quadrant {0}\".format(str(i+1)))\n plt.savefig(\"images/markov_graph_q{0}.png\".format(str(i+1)))\n ","sub_path":"markov_graphs.py","file_name":"markov_graphs.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"621349895","text":"# extensions\n#gitgutter\n\nclass Car:\n pass\n\nc = Car()\nprint(c, type(c))\n\n# Classes can have variables called fields\n\nclass Room:\n number = 'Rom 34'\n floor = 4\n\nr = Room()\nr1 = Room()\nprint(r.number, r1.number)\nprint(r.floor, r1.floor)\n\n# You can modify values\nr.number = 12\nr.floor = '5 floor'\nprint(r.number, r1.number)\nprint(r.floor, r1.floor)\n\n#classes can have functions inside: it's called a method\n\nclass Door:\n def open(self): # note that 'self' is the object\n print('self is', self)\n print('Door is opened!')\n self.opened = True\n\nd = Door()\nd.open()\n\n# Methods can accepts params\n\nclass Terminal:\n def hello(self, user_name):\n print('self is the objext iyself', self)\n print('Hello,', user_name)\n\nt = Terminal()\nt.hello('Nikita')\nt.hello('Vova')\n\n#classes can have both methods and fields\n\nclass Window:\n is_opened = False\n\n def open(self):\n self.is_opened = not self.is_opened\n print('Window is now', self.is_opened)\n\nw = Window()\nw1 = Window()\n\nprint('Initial state', w.is_opened, w1.is_opened)\n\nw.open()\nprint('New state', w.is_opened, w1.is_opened)\n\n# Constructor is called when new instance is create\n\nclass TestClass:\n def __init__(self):\n print('Constructor is called!')\n print('Self is the object itself!', self)\n\nt = TestClass()\nt1 = TestClass()\n\n# Constructor can have parameters\n\nclass Table:\n def __init__(self, number_of_legs):\n print('New table has {} legs'.format(number_of_legs))\n\nt = Table(4)\nt1 = Table(3)\n\n# But we need to save them into the fields!\n\nclass Chair:\n def __init__(self, color):\n self.color = color\n\nc = Chair('green')\nprint(c, c.color)\n\nc1 = Chair('red')\nprint(c1.color)\nprint('variable c did not change!', c.color)\n\n\n\nclass Calc:\n def __init__(self, number):\n self.number = number\n\n def calc_and_print(self):\n value = self.calc_value()\n self.print_number(value)\n\n def calc_value(self):\n return self.number * 10 + 2\n\n def print_number(self, value_to_print):\n print('------')\n print('Number is', value_to_print)\n print('------')\n\nclass CalcExtraValue(Calc):\n def calc_value(self):\n return self.number - 100\n\nc = Calc(3)\nc.calc_and_print()\n\nc1 = CalcExtraValue(3)\nc1.calc_and_print()","sub_path":"lesson5/OOP.py","file_name":"OOP.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"107221347","text":"from django.conf import settings\n\nfrom ip import get_geo\n\nOPEN_SERVER = getattr(settings, 'OPEN_SERVER_DOMAIN')\n\ndef get_environment(request):\n result = dict()\n result.update({'open_server': OPEN_SERVER})\n result.update({'html5': 'Mozilla/5.0' in request.META.get('HTTP_USER_AGENT', '')})\n network, name = get_geo(request)\n result.update({'intranet': network != ''})\n\n return result\n","sub_path":"environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"375251146","text":"\"\"\"\n@author: David Lei\n@since: 28/05/2016\n@modified: \n\n\"\"\"\n\ns = str(input())\n\nroundPar = {\"(\": \")\"}\nsquarePar = {\"[\": \"]\"}\ncurlyPar = {\"{\": \"}\"}\n\nfor _ in range(int(s)):\n par = str(input())\n l = len(par)\n if l%2 == 1:\n print(\"NO\")\n else:\n half = l//2\n t = True\n for index in range(half):\n if par[index] == \"(\":\n close = roundPar[\"(\"]\n elif par[index] == \"[\":\n close = squarePar[\"[\"]\n elif par[index] == \"{\":\n close = curlyPar[\"{\"]\n else:\n print(\"NO\")\n t = False\n break\n\n if close == par[l-index-1]:\n pass\n else:\n print(\"NO\")\n t = False\n break\n if t:\n print(\"YES\")\n\n\n\n","sub_path":"FIT2004-Algorithms-And-Data-Structures/Hackrank_Parentheses.py","file_name":"Hackrank_Parentheses.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"25288650","text":"# Family name: Hrithik Shah\n# Student number: 300069290\n# Course: IT1 1120 \n# Assignment 3 Part 1 Q3\n\ndef longest_run (u):\n \"\"\"\n list --> number\n Description: takes a list and returns the length of the longest run in it.\n Preconditions: u has to be a list\n \"\"\"\n if (len(u) == 0):\n return 0\n run_length = 1\n temp = 1\n for i in range (len(u)-1):\n if (float(u[i]) == float(u[i+1])):\n temp += 1\n if (temp > run_length):\n run_length = temp\n else:\n temp = 1\n return run_length\n\ndef main ():\n \"\"\"\n none --> none\n Description: asks for user input, calls longest_run and prints the result\n Preconditions: none\n \"\"\"\n user_input = input(\"Please input a list of numbers separated by space: \").strip().split()\n result = longest_run(user_input)\n print (result)\n\n\nmain ()\n","sub_path":"Assignments/A3_300069290/a3_Q3_300069290.py","file_name":"a3_Q3_300069290.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"102280616","text":"from .models import Quote\nimport urllib.request,json\n\nbase_url=None\n\ndef configure_request(app):\n global base_url\n base_url=app.config['QUOTE_BASE_URL']\n\ndef get_quote():\n get_quote_url=base_url.format()\n with urllib.request.urlopen(get_quote_url) as url:\n get_quote_data=url.read()\n get_quote_response=json.loads(get_quote_data)\n quote_results=None\n quote_results=get_quote_response\n quote_results=process_result(quote_results)\n\n return quote_results\n\ndef process_result(quote_list):\n quote_result=[]\n\n quote=quote_list\n author=quote_list\n\n quote_obj=Quote(quote,author)\n quote_result.append(quote_obj.quote)\n\n return quote_result\n","sub_path":"app/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"578194254","text":"import os\nimport subprocess\nfrom operator import itemgetter\nimport random\nimport numpy as np\nimport math\n\nimport datetime\nfrom pymongo import MongoClient\n\ndef hash_string(str):\n \"\"\" Same hashcode as java string hashcode s[0]*31^(n-1) + s[1]*31^(n-2) + ... + s[n-1] \"\"\"\n n = len(str)-1\n hc = 0\n for c in str:\n hc = (hc + ord(c)*pow(31, n, 4294967295)) % 4294967295\n n = n - 1\n return hc\n\ndef hash_report(operation, service, tags):\n hc = 1\n # Add operation to the label\n hc = hc * pow(hash_string(\"Operation\"), hash_string(operation), 4294967295) % 4294967295\n # Add process to the label\n hc = hc * pow(hash_string(\"Service\"), hash_string(service), 4294967295) % 4294967295\n # Add tags to the lavel\n for t in tags:\n hc = hc * pow(hash_string(t), hash_string(tags[t]), 4294967295) % 4294967295\n return hc\n\ndef connect_to_mongo():\n client = MongoClient(port=27017)\n return client.traces\n\ndef return_node_vector(trace_id):\n db = connect_to_mongo()\n trace = db.uber.find(\n { \"TraceID\": trace_id }\n )\n \n node_vector = {}\n for t in trace:\n label = hash_report(t[\"Operation\"], t[\"Process\"][\"Service\"], t[\"Tags\"])\n if label not in node_vector:\n node_vector[label] = 0\n node_vector[label] += 1\n \n return node_vector\n\ndef _fast_norm(x):\n \"\"\"Compute the number of x using numba.\n\n Args:\n x - a numpy vector (or list).\n\n Returns:\n The 2-norm of x.\n \"\"\"\n s = 0.0\n for i in range(len(x)):\n s += x[i] ** 2\n return math.sqrt(s)\n\n\ndef get_distance(x, y):\n \"\"\"Compute the norm of x - y.\n\n Args:\n x - a dict.\n y - a dict.\n\n Returns:\n The 2-norm of x - y.\n \"\"\"\n vector = []\n for item in x:\n if item in y:\n vector.append(x[item] - y[item])\n else:\n vector.append(x[item])\n\n for item in y:\n if item not in x:\n vector.append(y[item])\n\n return _fast_norm(np.asarray(vector)) \n\nclass KMeansForUber():\n \n def __init__(self, trace_ids):\n input = '/home/ubuntu/sampling-tracing/src/uber_traces_distance'\n self.ids = trace_ids\n print(datetime.datetime.now())\n self.distances = self.getDistancesFromFile(self.ids, input)\n print(datetime.datetime.now())\n\n #for a in self.ids:\n # for b in self.ids:\n # print(\"<+1>\")\n # vector_a = return_node_vector(a)\n # vector_b = return_node_vector(b)\n # score_a_b = get_distance(vector_a, vector_b)\n # #score_b_a = get_distance(vector_b, vector_a)\n # #score_a_a = get_distance(vector_a, vector_a)\n # #score_b_b = get_distance(vector_b, vector_b)\n # #distance = 1 - (score_a_b * score_b_a) / (score_a_a * score_b_b)\n # distance = score_a_b\n # self.distances[(a, b)] = distance\n # self.distances[(b, a)] = distance\n\n def getDistancesFromFile(self, ids, input):\n distances = {}\n \n aux_out = open('list_ids', 'w')\n for i in ids:\n aux_out.write(i + '\\n')\n aux_out.close()\n\n ##subprocess.call(['/bin/grep', '-f', 'list_ids', input, '>', 'uber_output'])\n #grep_cmd = ['/bin/grep', '-f', 'list_ids', input]\n\n #print('grep file')\n #with open('uber_output', \"w\") as outfile:\n # subprocess.call(grep_cmd, stdout=outfile)\n #print('end grep')\n\n f = open('uber_output', 'r')\n for line in f:\n token = line.split(\"\\t\")\n trace_a = token[0]\n trace_b = token[1]\n distance = float(token[2])\n\n if trace_a in ids and trace_b in ids:\n #print(trace_a + \" - \" + trace_b + \" = \" + str(distance))\n distances[(trace_a, trace_b)] = distance\n distances[(trace_b, trace_a)] = distance\n \n #print(distances)\n return distances\n\n \n def multi_kmeans(self, num_clusters, repetitions=100, max_iterations_per_kmeans=100):\n best_score = float(\"inf\")\n best_clustering = [set(self.ids)]\n for i in range(repetitions):\n next_clusters = self.kmeans(num_clusters, max_iterations_per_kmeans)\n next_score = 0\n for cluster in next_clusters:\n next_score = next_score + self.cluster_score(cluster)\n if next_score < best_score:\n print(\"New best! %f -> %f\" % (best_score, next_score))\n best_score = next_score\n best_clustering = next_clusters\n print(\"Best clustering has score %f\" % best_score)\n return best_clustering\n \n \n def kmeans(self, num_clusters, max_iterations=100):\n clusters = [set() for i in range(num_clusters)] \n for i in range(num_clusters):\n clusters[i].add(self.ids[random.randint(0,len(self.ids)-1)])\n i = 0\n converged = False\n while i < max_iterations and not converged:\n newclusters = [set() for x in range(num_clusters)]\n for id in self.ids:\n closest = 0\n distance_to_closest = float(\"inf\")\n for j in range(num_clusters):\n distance = self.distance_to_cluster(id, clusters[j])\n if distance < distance_to_closest:\n closest = j\n distance_to_closest = distance\n newclusters[closest].add(id)\n converged = clusters==newclusters\n i = i + 1\n clusters = newclusters\n return clusters\n \n def distance_to_cluster(self, id, cluster):\n distance = 0\n for member in cluster:\n distance = distance + self.distances[(id, member)]\n if len(cluster) == 0:\n return distance\n return distance / len(cluster)\n \n def cluster_score(self, cluster):\n elems = list(cluster)\n if len(elems)<2:\n return 0.0\n sum = 0.0\n norm = 0.0\n for i in range(len(elems)):\n for j in range(i):\n sum = sum + self.distances[(elems[i], elems[j])]\n #norm = norm + 1.0\n return sum #/ norm\n","sub_path":"src/sampling/graph/KMeansForUberTraces.py","file_name":"KMeansForUberTraces.py","file_ext":"py","file_size_in_byte":6320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"466403508","text":"import os\nfrom flask import Blueprint\nfrom flask import Flask\nfrom flask import render_template\nfrom flask import request\nfrom flask import send_from_directory\nfrom flask import redirect\nfrom flask import url_for, jsonify\nimport json\nfrom app.views.user import user\nfrom app.base.mapTable import mapUser\nfrom app.base.loggerNoSQL import loggerNoSQL\n\ndataUser = Blueprint('dataUser', __name__)\n\n@dataUser.route('/authenticateUser', methods=['POST'])\ndef authenticateUser():\n rec = json.loads(request.get_data())\n\n userx = rec['EMAIL']\n password = rec['PASSWORD']\n idUser = rec['idUser']\n\n try:\n user1 = user(None, idUser)\n backData = user1.testAuthenticate(userx, password)\n del user1\n\n return backData\n except Exception as ex:\n return jsonify({ \"message\": ex.args[0] }), 500\n\n@dataUser.route('/listUsers', methods=['POST'])\ndef listUsers():\n rec = json.loads(request.get_data())\n\n nome = rec['nome']\n email = rec['email']\n\n user1 = user(rec['keep'])\n\n result = user1.testListOfUsers(nome, email)\n\n del user1\n\n return result\n\n@dataUser.route('/getUser', methods=['POST'])\ndef getUser():\n\n rec = json.loads(request.get_data())\n\n ID_USER = rec['ID_USER']\n keep = rec['keep']\n\n user1 = user(keep)\n result = user1.getUser(ID_USER)\n\n del user1\n\n return result\n\n@dataUser.route('/saveUser', methods=['POST'])\ndef saveUser():\n rec = json.loads(request.get_data())\n\n ID_USER = rec['ID_USER']\n NAME_USER = rec['NAME_USER']\n EMAIL = rec['EMAIL']\n PASSWORD_USER = rec['PASSWORD_USER']\n KIND_OF_USER = rec['KIND_OF_USER']\n USER_ENABLED = rec['USER_ENABLED']\n keep = rec['keep']\n idUser = rec['idUser']\n\n table = mapUser(ID_USER, NAME_USER, PASSWORD_USER, EMAIL, USER_ENABLED, KIND_OF_USER)\n\n user1 = user(keep, idUser)\n retorno = user1.testSaveUser(table)\n\n del user1\n\n return retorno\n\n@dataUser.route('/deleteUser', methods=['DELETE'])\ndef deleteUser():\n rec = json.loads(request.get_data())\n\n ID_USER = rec['ID_USER']\n keep = rec['keep']\n\n user1 = user(keep)\n user1.deleteUser(ID_USER)\n\n del user1\n\n return \"Ok\"\n\n@dataUser.route('/listOfLogs', methods=['POST'])\ndef listOfLogs():\n rec = json.loads(request.get_data())\n\n keep = rec['keep']\n idUser = int(rec['idUser'])\n data1 = rec['data']\n start = int(rec['start'])\n limit = int(rec['limit'])\n\n log = loggerNoSQL()\n\n result = log.testListLogs(data1, start, limit)\n\n return result","sub_path":"app/views/routeUser.py","file_name":"routeUser.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"546771097","text":"from flask import Blueprint\nfrom flask import flash\nfrom flask import g\nfrom flask import redirect\nfrom flask import render_template\nfrom flask import request\nfrom flask import url_for\nfrom flask import jsonify\nfrom cms import db\nfrom flask import session\nfrom flask_login import current_user\nfrom datetime import date\nfrom cms.models.MyTask import MyTask\nfrom werkzeug.exceptions import abort\n\nbp = Blueprint(\"message\", __name__)\nUsers=current_user\n@bp.route(\"/message\")\ndef inbox():\n myTask=MyTask.query.all()\n\n return render_template(\"components/showMyTask.html\",myTask=myTask)\n\n@bp.route(\"/message/compose\", methods=(\"GET\", \"POST\"))\ndef compose():\n if request.method == \"POST\":\n hascontent = MyTask.query.filter_by(id=id).first()\n if hascontent is None:\n return redirect(url_for('content.show'))\n taskTitle = request.form['taskTitle']\n instruction = request.form['instruction']\n dueDate = request.form['dueDate']\n volume = request.form['volume']\n number = request.form['number']\n assignedTo = request.form['assignedTo']\n assigneeID = request.form['assigneeID']\n status = request.form['status']\n hascontent.taskTitle=taskTitle\n hascontent.instruction=instruction\n hascontent.dueDate=dueDate\n hascontent.volume=volume\n hascontent.number=number\n hascontent.assignedTo=assignedTo\n hascontent.assigneeID=assigneeID\n hascontent.status=status\n db.session.add(hascontent)\n db.session.commit()\n flash(\"Content Updated Succesfully\")\n return redirect(url_for('myTask.show'))\n\n else:\n hascontent = MyTask.query.filter_by(id=id).first()\n return render_template(\"components/editMyTask.html\",contents=hascontent)\n\n@bp.route(\"/message/delete/\", methods=(\"GET\", \"POST\"))\ndef delete(id):\n myTask=MyTask.query.get_or_404(id)\n db.session.delete(myTask)\n db.session.commit()\n flash(\"Content Deleted Succesfully\")\n return redirect(url_for('myTask.show'))\n\n@bp.route(\"/message/create\", methods=(\"GET\", \"POST\"))\ndef create():\n if request.method == \"POST\":\n taskTitle = request.form['taskTitle']\n instruction = request.form['instruction']\n dueDate = request.form['dueDate']\n volume = request.form['volume']\n number = request.form['number']\n assignedTo = request.form['assignedTo']\n assigneeID = request.form['assigneeID']\n status = request.form['status']\n myTask = MyTask(taskTitle, instruction, dueDate,volume,number,assignedTo,assigneeID,status)\n db.session.add(myTask)\n db.session.commit()\n flash(\"Content Created Succesfully\")\n return redirect(url_for('myTask.show'))\n else:\n return render_template(\"components/createmyTask.html\")","sub_path":"cms/route/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":2831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"177341904","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n###############################################################################\n# Copyright Kitware Inc.\n#\n# Licensed under the Apache License, Version 2.0 ( the \"License\" );\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n###############################################################################\n\nimport bson.json_util\n\nfrom . import features\n\nfrom girder import events\nfrom girder.constants import AccessType\nfrom girder.utility.model_importer import ModelImporter\nfrom girder.api.describe import Description\nfrom girder.api.rest import Resource, RestException\n\n\nclass GeoJSON(Resource):\n\n def __init__(self):\n self.resourceName = 'geojson'\n\n self.route('GET', ('points',), self.points)\n\n def points(self, params):\n self.requireParams(('q',), params)\n limit, offset, sort = self.getPagingParameters(params, 'name')\n latitude = params.get('latitude', 'meta.latitude')\n longitude = params.get('longitude', 'meta.longitude')\n\n spec = {\n 'type': 'point',\n 'latitude': latitude,\n 'longitude': longitude,\n 'keys': ['meta', 'name', 'description', '_id'],\n 'flatten': ['meta']\n }\n\n try:\n query = bson.json_util.loads(params['q'])\n except ValueError: # pragma: no cover\n raise RestException('The query parameter must be a JSON object.')\n\n events.trigger('geojson.points', info={\n 'spec': spec,\n 'query': query\n })\n\n # make sure the lat/lon are whitelisted keys to prevent private\n # data leaking\n if spec['latitude'].split('.')[0] not in spec['keys'] or \\\n spec['longitude'].split('.')[0] not in spec['keys']:\n raise RestException('Invalid latitude/longitude key.', code=402)\n\n coll = features.FeatureCollection(points=spec)\n\n item = ModelImporter().model('item')\n cursor = item.find(\n query,\n limit=0\n )\n\n cursor = item.filterResultsByPermission(\n cursor,\n user=self.getCurrentUser(),\n level=AccessType.READ,\n limit=limit,\n offset=offset\n )\n\n try:\n obj = coll(points=cursor)\n except features.GeoJSONException:\n raise RestException(\n 'Could not assemble a geoJSON object from spec.',\n code=401\n )\n\n return obj\n\n points.description = (\n Description(\n 'Returns an item query as a geoJSON point feature collection.'\n )\n .param('q', 'The search query as a JSON object.')\n .param(\n 'longitude',\n 'The location of the longitude in the object ' +\n '(default=\"meta.longitude\").',\n required=False\n )\n .param(\n 'latitude',\n 'The location of the latitude in the object ' +\n '(default=\"meta.latitude\").',\n required=False\n )\n .param(\n 'limit',\n 'Result set size limit (default=50).',\n required=False,\n dataType='int'\n )\n .param(\n 'offset',\n 'Offset into result set (default=0).',\n required=False,\n dataType='int'\n )\n .errorResponse()\n .errorResponse('Could not assemble geoJSON object.', 401)\n .errorResponse('Invalid latitude/longitude key.', 402)\n )\n\n\ndef load(info):\n info['apiRoot'].geojson = GeoJSON()\n","sub_path":"plugins/geoJSON/server/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"8329499","text":"\"\"\"\nDiff command.\n\nCompares metrics between uncommitted files and indexed files.\n\"\"\"\nimport multiprocessing\nimport os\nimport tabulate\n\nfrom pathlib import Path\nfrom wily import logger, format_revision, format_date\nfrom wily.archivers import resolve_archiver\nfrom wily.config import DEFAULT_GRID_STYLE, DEFAULT_PATH\nfrom wily.operators import (\n resolve_metric,\n resolve_operator,\n get_metric,\n GOOD_COLORS,\n BAD_COLORS,\n OperatorLevel,\n)\nfrom wily.commands.build import run_operator\nfrom wily.state import State\n\nimport radon.cli.harvest\n\n\ndef diff(config, files, metrics, changes_only=True, detail=True, revision=None):\n \"\"\"\n Show the differences in metrics for each of the files.\n\n :param config: The wily configuration\n :type config: :namedtuple:`wily.config.WilyConfig`\n\n :param files: The files to compare.\n :type files: ``list`` of ``str``\n\n :param metrics: The metrics to measure.\n :type metrics: ``list`` of ``str``\n\n :param changes_only: Only include changes files in output.\n :type changes_only: ``bool``\n\n :param detail: Show details (function-level)\n :type detail: ``bool``\n\n :param revision: Compare with specific revision\n :type revision: ``str``\n \"\"\"\n config.targets = files\n files = list(files)\n state = State(config)\n\n # Resolve target paths when the cli has specified --path\n if config.path != DEFAULT_PATH:\n targets = [str(Path(config.path) / Path(file)) for file in files]\n else:\n targets = files\n\n # Expand directories to paths\n files = [\n os.path.relpath(fn, config.path)\n for fn in radon.cli.harvest.iter_filenames(targets)\n ]\n logger.debug(f\"Targeting - {files}\")\n\n if not revision:\n target_revision = state.index[state.default_archiver].last_revision\n else:\n rev = resolve_archiver(state.default_archiver).cls(config).find(revision)\n logger.debug(f\"Resolved {revision} to {rev.key} ({rev.message})\")\n try:\n target_revision = state.index[state.default_archiver][rev.key]\n except KeyError:\n logger.error(\n f\"Revision {revision} is not in the cache, make sure you have run wily build.\"\n )\n exit(1)\n\n logger.info(\n f\"Comparing current with {format_revision(target_revision.revision.key)} by {target_revision.revision.author_name} on {format_date(target_revision.revision.date)}.\"\n )\n\n # Convert the list of metrics to a list of metric instances\n operators = {resolve_operator(metric.split(\".\")[0]) for metric in metrics}\n metrics = [(metric.split(\".\")[0], resolve_metric(metric)) for metric in metrics]\n results = []\n\n # Build a set of operators\n with multiprocessing.Pool(processes=len(operators)) as pool:\n operator_exec_out = pool.starmap(\n run_operator, [(operator, None, config, targets) for operator in operators]\n )\n data = {}\n for operator_name, result in operator_exec_out:\n data[operator_name] = result\n\n # Write a summary table\n extra = []\n for operator, metric in metrics:\n if detail and resolve_operator(operator).level == OperatorLevel.Object:\n for file in files:\n try:\n extra.extend(\n [\n f\"{file}:{k}\"\n for k in data[operator][file][\"detailed\"].keys()\n if k != metric.name\n and isinstance(data[operator][file][\"detailed\"][k], dict)\n ]\n )\n except KeyError:\n logger.debug(f\"File {file} not in cache\")\n logger.debug(\"Cache follows -- \")\n logger.debug(data[operator])\n files.extend(extra)\n logger.debug(files)\n for file in files:\n metrics_data = []\n has_changes = False\n for operator, metric in metrics:\n try:\n current = target_revision.get(\n config, state.default_archiver, operator, file, metric.name\n )\n except KeyError:\n current = \"-\"\n try:\n new = get_metric(data, operator, file, metric.name)\n except KeyError:\n new = \"-\"\n if new != current:\n has_changes = True\n if metric.type in (int, float) and new != \"-\" and current != \"-\":\n if current > new:\n metrics_data.append(\n \"{0:n} -> \\u001b[{2}m{1:n}\\u001b[0m\".format(\n current, new, BAD_COLORS[metric.measure]\n )\n )\n elif current < new:\n metrics_data.append(\n \"{0:n} -> \\u001b[{2}m{1:n}\\u001b[0m\".format(\n current, new, GOOD_COLORS[metric.measure]\n )\n )\n else:\n metrics_data.append(\"{0:n} -> {1:n}\".format(current, new))\n else:\n if current == \"-\" and new == \"-\":\n metrics_data.append(\"-\")\n else:\n metrics_data.append(\"{0} -> {1}\".format(current, new))\n if has_changes or not changes_only:\n results.append((file, *metrics_data))\n else:\n logger.debug(metrics_data)\n\n descriptions = [metric.description for operator, metric in metrics]\n headers = (\"File\", *descriptions)\n if len(results) > 0:\n print(\n # But it still makes more sense to show the newest at the top, so reverse again\n tabulate.tabulate(\n headers=headers, tabular_data=results, tablefmt=DEFAULT_GRID_STYLE\n )\n )\n","sub_path":"src/wily/commands/diff.py","file_name":"diff.py","file_ext":"py","file_size_in_byte":5837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"390009258","text":"# @author Aaron Moran \n#\n# Binary Trees - A binary tree has 1 parent and 2 children the next child is regarded as a sibling.\n# Parent Node is the centre/head of the Tree. Regardless of its value. Larger Children Node values do not override the Parent.\n# To be a Tree A Graph must satisfy two requirements -Acylic- and -Connected- \n#\n\nclass Node:\n\n def __init__(self, data):\n\n # left && right notes initialised to insert into tree\n self.left = None\n self.right = None\n self.data = data\n\n # inserting into the tree\n # data which is GREATER than PARENT will be inserted into the right\n # data which is LESS than PARENT will be insted into the left\n def insert(self, data):\n # Compare the new value with the parent node\n if self.data:\n # if less than Parent Node go LEFT\n if data < self.data:\n if self.left is None:\n self.left = Node(data)\n else:\n self.left.insert(data)\n # if greater than Parent Node go RIGHT\n elif data > self.data:\n if self.right is None:\n self.right = Node(data)\n else:\n self.right.insert(data)\n # else if insert(10) and parent(10) insert not entered or duplicated\n else:\n self.data = data\n\n # display the current tree\n def PrintTree(self):\n if self.left:\n self.left.PrintTree()\n print( self.data),\n if self.right:\n self.right.PrintTree()\n\n\n# User prompt\nuser_input = int(input(\"Enter the Parent Node value : \"))\n# Parent Node\nroot = Node(user_input)\n# inserting into Nodes as child nodes, then to siblings\nroot.insert(6)\nroot.insert(14)\nroot.insert(3)\nroot.insert(10)\nroot.insert(19)\nroot.insert(8)\n# printing and displaying the tree\nprint(\"==========BINARY-TREE==========\")\nprint(\"Parent is :\", user_input)\nprint(\"-------------------------------\")\nroot.PrintTree()","sub_path":"scripts/Trees.py","file_name":"Trees.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"138954239","text":"import sys\nimport math\nimport numpy as np\nfrom sklearn.datasets import load_digits\nfrom sklearn.datasets import load_breast_cancer\nfrom sklearn.preprocessing import scale\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.svm import SVC\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.ensemble import BaggingClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.ensemble import RandomForestClassifier\n\n\ndef report_accuracy(predicted, actual):\n correct = 0\n for i in range(len(predicted)):\n if predicted[i] == actual[i]:\n correct += 1\n percentage = round(correct / len(predicted), 2) * 100\n print(\"Predicting targets at {}% accuracy\".format(percentage))\n\nclass ds(object):\n pass\n\n\ndef read_haberman():\n raw_data = np.genfromtxt(\"haberman.data.txt\", dtype=str, delimiter=',')\n\n dts = ds()\n dts.data = raw_data[:, :len(raw_data[0]) - 1].astype(np.float)\n dts.target = raw_data[:, len(raw_data[0]) - 1:].astype(np.float).flatten()\n return dts\n\n\ndef main(argv):\n # dataset = load_digits()\n # dataset = load_breast_cancer()\n dataset = read_haberman()\n\n dataset.data = scale(dataset.data)\n rows = len(dataset.data)\n test_items = math.floor(rows * 0.3)\n indices = np.random.permutation(rows)\n training_data = dataset.data[indices[:-test_items]]\n training_targets = dataset.target[indices[:-test_items]]\n test_data = dataset.data[indices[-test_items:]]\n test_targets = dataset.target[indices[-test_items:]]\n\n max_bag_samples = 0.7\n max_bag_features = 0.7\n\n # kNN\n knn_clf = KNeighborsClassifier()\n knn_clf.fit(training_data, training_targets)\n print(\"kNN accuracy:\")\n report_accuracy(knn_clf.predict(test_data), test_targets)\n\n bagging_knn = BaggingClassifier(KNeighborsClassifier(), max_samples=max_bag_samples, max_features=max_bag_features)\n bagging_knn.fit(training_data, training_targets)\n print(\"kNN bagged accuracy:\")\n report_accuracy(bagging_knn.predict(test_data), test_targets)\n print()\n\n # SVM\n svm_clf = SVC()\n svm_clf.fit(training_data, training_targets)\n print(\"SVM accuracy:\")\n report_accuracy(svm_clf.predict(test_data), test_targets)\n\n bagging_svm = BaggingClassifier(SVC(), max_samples=max_bag_samples, max_features=max_bag_features)\n bagging_svm.fit(training_data, training_targets)\n print(\"kNN bagged accuracy:\")\n report_accuracy(bagging_svm.predict(test_data), test_targets)\n print()\n\n # Naive Bayes\n nb_clf = GaussianNB()\n nb_clf.fit(training_data, training_targets)\n print(\"Naive Bayes accuracy:\")\n report_accuracy(nb_clf.predict(test_data), test_targets)\n\n bagging_nb = BaggingClassifier(GaussianNB(), max_samples=max_bag_samples, max_features=max_bag_features)\n bagging_nb.fit(training_data, training_targets)\n print(\"kNN bagged accuracy:\")\n report_accuracy(bagging_nb.predict(test_data), test_targets)\n print()\n\n # AdaBoost\n boost_clf = AdaBoostClassifier(n_estimators=200)\n scores = cross_val_score(boost_clf, dataset.data, dataset.target)\n print(\"AdaBoost accuracy:\")\n print(scores.mean())\n print()\n\n # Random Forest\n forest_clf = RandomForestClassifier(n_estimators=20)\n scores = cross_val_score(forest_clf, dataset.data, dataset.target)\n print(\"Random Forest accuracy:\")\n print(scores.mean())\n\n return 0\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","sub_path":"Ensemble/Experiment.py","file_name":"Experiment.py","file_ext":"py","file_size_in_byte":3477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"88534868","text":"'''\r\n programsko rješenje za 2. laboratorijsku vježbu na kolegiju \"Uvod u teoriju računarstva\"\r\n autor: Mihael Miličević\r\n'''\r\n\r\n# osnovna klasa za modeliranje DKA automata\r\nclass DKA():\r\n\r\n # konstrutor klase, kao argumente prima 5 varijabli koje predstavljaju uređenu petorku kojom se DKA formalno definira\r\n def __init__(self, states, symbols, acceptableStates, startState, transitionFunction):\r\n self.states = states\r\n self.symbols = symbols\r\n self.acceptableStates = acceptableStates\r\n self.startState = startState\r\n self.transitionFunction = {}\r\n for el in transitionFunction:\r\n self.transitionFunction[(el[0],el[1])] = el[2]\r\n\r\n # glavna funkcija minimizacije automata, prvo poziva funkciju uklanjanja nedohvatljivih stanja, a potom poziva funkciju spajanja istovjetnih stanja\r\n def minimize(self):\r\n self.dropUnreachableStates()\r\n self.mergeNondistinguishableStates()\r\n\r\n # pomoćna funkcija čija svrha je pronalazak i uklanjanje nedohvatljivih stanja automata\r\n def dropUnreachableStates(self):\r\n\r\n # kretanje iz početnog stanja automata i korištenje BFS-a za identifikaciju svih dohvatljivih stanja\r\n reachedStates = set()\r\n queue = [self.startState]\r\n while len(queue) != 0:\r\n curr = queue.pop(0)\r\n\r\n # računanje svih stanja u koje automat može ići iz trenutnog stanja\r\n transitions = [transition for transition in self.transitionFunction if transition[0] == curr]\r\n for transition in transitions:\r\n\r\n # svrha ove provjere je da se ne zavrtimo u beskonačnoj petlji, tj. da ne obilazimo već obiđena stanja\r\n if transition[0] not in reachedStates:\r\n queue.append(self.transitionFunction[transition])\r\n reachedStates.add(curr)\r\n\r\n # računanje svih nedohvatljivih stanja\r\n self.states = set(self.states)\r\n self.acceptableStates = set(self.acceptableStates)\r\n unreachableStates = self.states - reachedStates\r\n\r\n #uklanjanje nedohvatljivih stanja iz automata\r\n for state in unreachableStates:\r\n\r\n # uklanjanje nedohvatljivog stanja iz liste svih stanja automata\r\n self.states.remove(state)\r\n\r\n # uklanjanje nedohvatljivog stanja iz liste svih prihvatljivih stanja automata\r\n if state in self.acceptableStates:\r\n self.acceptableStates.remove(state)\r\n\r\n # uklanjanje nedohvatljivog stanja iz definicije funkcije prijelaza automata\r\n transitions = [transition for transition in self.transitionFunction if transition[0] == state]\r\n for transition in transitions:\r\n self.transitionFunction.pop(transition)\r\n \r\n # pretvorba skupa stanja nazad u listu, i sortiranje liste\r\n self.states = list(self.states)\r\n self.states.sort()\r\n\r\n # pretvordba skupa prihvatljivih stanja nazad u listu, i sortiranje liste\r\n self.acceptableStates = list(self.acceptableStates)\r\n self.acceptableStates.sort()\r\n\r\n # pomoćna funkcija čija svrha je pronalazak svih istovjetnih stanja, i spajanje takvih stanja u jedno\r\n def mergeNondistinguishableStates(self):\r\n\r\n # korišteni algoritam je Algoritam 3 iz udžbenika(str. 25.-27.), pa je prvi korak iniciranje tablice\r\n table = {}\r\n propagationTable = {}\r\n for i in range(1, len(self.states)):\r\n for j in range(i):\r\n table[(self.states[i],self.states[j])] = 0\r\n propagationTable[(self.states[i],self.states[j])] = []\r\n\r\n # svrha varijable iter je iteriranje po svih parovima stanja\r\n iter = list(table)\r\n iter.sort()\r\n\r\n # pronalazak parova u kojima članovi nemaju istu prihvatljivost, i označavanje takvih parova u tablici kao 1\r\n for el in iter:\r\n flag1 = False\r\n flag2 = False\r\n if el[0] in self.acceptableStates:\r\n flag1 = True\r\n if el[1] in self.acceptableStates:\r\n flag2 = True\r\n if flag1 != flag2:\r\n table[el] = 1\r\n\r\n # prolazak kroz sve elemente tablice sa svrhom identifikacije različitih stanja\r\n for el in iter:\r\n\r\n # ukoliko smo došli na par stanja koji je označen kao 1 tj. stanja su različita, nemamo što provjeravati, idemo na sljedeći korak\r\n if table[el] == 1:\r\n continue\r\n\r\n # računanje stanja prijelaza za prvi i drugi element para stanja koji trenutno obrađujemo\r\n states1 = [self.transitionFunction[transition] for transition in self.transitionFunction if transition[0] == el[0]]\r\n states2 = [self.transitionFunction[transition] for transition in self.transitionFunction if transition[0] == el[1]]\r\n\r\n # provjera istovjetnosti stanja za svaki par prijelaza\r\n flag = False\r\n for i in range(len(states1)):\r\n state1 = states1[i]\r\n state2 = states2[i]\r\n\r\n # izvođenje algoritma ovisi o tome da je leksikografski veće stanje na prvom mjestu, pa ovdje vršimo zamjenu ako to nije slučaj\r\n if state1 < state2:\r\n state1,state2 = state2,state1\r\n\r\n # provjera različitosti stanja je nužna jer postoji mogućnost da oba stanja za neki znak prelaze u isto stanje, i tada nemamo što provjeravati\r\n if state1 != state2:\r\n\r\n # ukoliko stanja koja provjeravamo za neki ulazni znak prelaze u stanja za koja znamo da su različita, nužno su i stanja koja provjeravamo različita\r\n if table[(state1,state2)] == 1:\r\n flag = True\r\n\r\n # spremamo stanja koja trenutno provjeravamo u listu koja pripada stanjima u koja naša trenutna stanja prelaze, kako bismo mogli promijeniti trenutno provjeravana stanja ako se stanja u koja trenutno prelazimo u nekom trenutku identificiraju kao različita\r\n else:\r\n propagationTable[(state1,state2)].append(el)\r\n \r\n # zastavica označava da smo trenutni par stanja označili kao različita, i sad je potrebno rekurzivno sve parove koji ovise o trenutnom paru također označiti kao različite\r\n if flag:\r\n\r\n # koristimo BFS, pa nam stoga treba red\r\n queue = [el]\r\n while len(queue) != 0:\r\n state = queue.pop(0)\r\n for propagation in propagationTable[state]:\r\n if table[propagation] == 0:\r\n queue.append(propagation)\r\n table[state] = 1\r\n \r\n # istovjetna stanja prepoznajemo po tome što je vrijednost njihovog para u tablici jednaka nuli\r\n undistinguishableStates = [el for el in table if table[el] == 0]\r\n\r\n # u ovu listu ćemo spremati sve skupove stanja koja treba združiti\r\n mergedStates = []\r\n\r\n # obrađujemo jedan po jedan par istovjetnih stanja, i stoga izvodimo petlju dok god ih ne obradimo sve\r\n while len(undistinguishableStates) != 0:\r\n \r\n # stvaramo novi skup u koji ćemo na temelju svojstva tranzitivnosti relacije istovjetnosti dodavati sva istovjetna stanja\r\n newMerge = set()\r\n\r\n # prvo stanje uzimamo kao prvi element liste parova istovjetnih stanja\r\n state = undistinguishableStates.pop(0)\r\n\r\n # dodavanje prva dva stanja u skup svih istovjetnih stanja\r\n newMerge.add(state[0])\r\n newMerge.add(state[1])\r\n\r\n # izvodit ćemo sljedeću petlju dok god ne budemo u ostalim istovjetnim stanjima mogli pronaći ijedan par koji odgovara elementima trenutnog skupa\r\n flag = True\r\n while flag:\r\n\r\n # u varijablu indexes ćemo spremati indekse onih parova koje treba ubaciti u trenutni skup\r\n indexes = []\r\n\r\n # za svaki par istovjetnih stanja koje dosad nismo spojili u neki u skup provjeravamo imaju li zajednički element s trenutnim skupom\r\n for i in range(len(undistinguishableStates)):\r\n if undistinguishableStates[i][0] in newMerge or undistinguishableStates[i][1] in newMerge:\r\n indexes.append(i)\r\n\r\n # ukoliko u listi indexes nema ništa, više ne možemo ništa spariti i prekidamo petlju\r\n if len(indexes) == 0:\r\n flag = False\r\n\r\n # inače smo pronašli barem jedan par stanja koji ćemo spojiti s trenutnim skupom\r\n else:\r\n\r\n # obrtanje indeksa radi potencijalnih nuspojava pri brisanju odgovarajućih parova iz liste istovjetnih stanja\r\n indexes = indexes[::-1]\r\n\r\n # dodavanje oba elementa para u skup\r\n for index in indexes:\r\n newMerge.add(undistinguishableStates[index][0])\r\n newMerge.add(undistinguishableStates[index][1])\r\n\r\n # brisanje para iz liste istovjetnih stanja\r\n for index in indexes:\r\n undistinguishableStates.pop(index)\r\n\r\n # u varijabli mergedStates čuvamo sve skupove stanja, i sada u nju dodajemo novi skup\r\n mergedStates.append(newMerge)\r\n\r\n # pretvaranje listi stanja i prihvatljivih stanja u skupove radi lakšeg brisanja i dodavanja elemenata\r\n self.acceptableStates = set(self.acceptableStates)\r\n self.states = set(self.states)\r\n \r\n # petlju ponavljamo za svako stanje u listu skupova mergedStates\r\n for state in mergedStates:\r\n\r\n # pretvaranje skupa u listu\r\n l = list(state)\r\n\r\n # sortiranje liste i uzimanje leksikografski najmanjeg člana kao oznaku ovog stanja, sva ostala ekvivalentna stanja ćemo zamijeniti s ovim stanjem\r\n l.sort()\r\n newState = l[0]\r\n\r\n # svaki element tog skupa koji nije početni tj. leksikografski najmanji brišemo i na prikladan način mijenjamo u definiciji automata\r\n for el in l[1:]:\r\n\r\n # ukoliko je to stanje u skupu prihvatljivih stanja, mijenjamo ga s novom oznakom tog zajedničkog stanja\r\n if el in self.acceptableStates:\r\n self.acceptableStates.remove(el)\r\n self.acceptableStates.add(newState)\r\n\r\n # mičemo stanje iz skupa svih stanja automata\r\n self.states.remove(el)\r\n\r\n # ukoliko je to stanje početno stanje, mijenjamo ga s novom oznakom tog zajedničkog stanja\r\n if self.startState == el:\r\n self.startState = newState\r\n\r\n # brisanje stanja iz funkcije prijelaza\r\n for k in self.transitionFunction:\r\n\r\n # za svaki prijelaz koji završava u tom stanju, to stanje mijenjamo novom oznakom zajedničkog stanja\r\n if self.transitionFunction[k] == el:\r\n self.transitionFunction[k] = newState\r\n\r\n # u varijabli keys nalaze se svi parovi prijelaza za stanje koje brišemo\r\n keys = [k for k in self.transitionFunction if k[0] == el]\r\n \r\n # brisanje prijelaza stanja kojeg brišemo iz funkcije prijelaza\r\n for key in keys:\r\n self.transitionFunction.pop(key)\r\n\r\n # pretvorba skupa stanja nazad u listu, i sortiranje liste\r\n self.states = list(self.states)\r\n self.states.sort()\r\n\r\n # pretvorba skupa prihvatljivih stanja nazad u listu, i sortiranje liste\r\n self.acceptableStates = list(self.acceptableStates)\r\n self.acceptableStates.sort()\r\n\r\n # funkcija generira znakovni niz koji odgovara traženoj definiciji automata \r\n def definition(self):\r\n\r\n # u varijablu retVal ćemo spremiti definiciju automata\r\n retVal = \"\"\r\n\r\n # računanje pravilnog ispisa svih stanja automata\r\n states = \"\"\r\n self.states.sort()\r\n for state in self.states:\r\n states += state + \",\"\r\n states = states[:len(states)-1]\r\n retVal += states + \"\\n\"\r\n\r\n # računanje pravilnog ispisa svih znakove abecede automata\r\n symbols = \"\"\r\n self.symbols.sort()\r\n for symbol in self.symbols:\r\n symbols += symbol + \",\"\r\n symbols = symbols[:len(symbols)-1]\r\n retVal += symbols + \"\\n\"\r\n\r\n # računanje pravilnog ispisa svih prihvatljivih stanja automata\r\n acceptableStates = \"\"\r\n self.acceptableStates.sort()\r\n for state in self.acceptableStates:\r\n acceptableStates += state + \",\"\r\n acceptableStates = acceptableStates[:len(acceptableStates)-1]\r\n retVal += acceptableStates + \"\\n\"\r\n\r\n # računanje pravilnog ispisa početnog stanja automata\r\n retVal += self.startState + \"\\n\"\r\n\r\n # računanje pravilnog ispisa funkcije prijelaza automata\r\n transitions = []\r\n for el in self.transitionFunction: \r\n transition = el[0] + \",\" + el[1] + \"->\" + self.transitionFunction[(el[0],el[1])]\r\n transitions.append(transition)\r\n transitions.sort()\r\n for transition in transitions:\r\n retVal += transition + \"\\n\"\r\n\r\n # vraćanje znakovnog niza koji odgovora definiciji automata\r\n return retVal\r\n\r\n# glavna funkcija u programu koja čita ulaz, parsira ga, inicijalizira objekt tipa DKA, minimizira taj automat, i potom vraća njegovu definiciju\r\ndef main():\r\n\r\n # u varijablu lines spremamo pojedine redove u ulazu\r\n lines = []\r\n try:\r\n while True:\r\n i = input()\r\n lines.append(i)\r\n except EOFError:\r\n pass\r\n\r\n # čitanje svih stanja automata\r\n states = lines[0].split(\",\")\r\n\r\n # čitanje svih simbola koji se mogu naći na ulazu automata\r\n symbols = lines[1].split(\",\")\r\n\r\n # čitanje svih prihvatljivih stanja automata, uzevši u obzir da je moguće da je taj redak u ulazu prazan\r\n if len(lines[2]) != 0:\r\n acceptableStates = lines[2].split(\",\")\r\n else:\r\n acceptableStates = []\r\n\r\n # čitanje početnog stanja automata\r\n startState = lines[3]\r\n\r\n # ostale linije u ulazu označavaju funkciju prijelaza\r\n transitionFunctionInput = lines[4:]\r\n\r\n # čitanje svakog retka koji označava funkciju prijelaza, te potom parsiranje i dodavanje u varijablu funkcije prijelaza\r\n transitionFunction = []\r\n for el in transitionFunctionInput:\r\n el = el.split(\"->\")\r\n start = el[0].split(\",\")[0]\r\n symbol = el[0].split(\",\")[1]\r\n finish = el[1].split(\",\")[0]\r\n transitionFunction.append((start,symbol,finish))\r\n\r\n # inicijaliziranje varijable tipa DKA\r\n automaton = DKA(states,symbols,acceptableStates,startState,transitionFunction)\r\n\r\n # pozivanje funkcije minimiziranja DKA\r\n automaton.minimize()\r\n\r\n # vraćanje definicije minimiziranog automata\r\n return automaton.definition()\r\n\r\n# pokretanje glavne funkcije i ispis rezultata\r\nprint(main())","sub_path":"MinDka.py","file_name":"MinDka.py","file_ext":"py","file_size_in_byte":15359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"537878974","text":"# coding:utf-8\nfrom PIL import Image, ImageDraw\n\n\nclass ClearNoise:\n\n # 判断噪点\n # 将灰度值与周围灰度值相似度较小的点认为是噪点\n def clearNoise(self, img, radius, m, func):\n pix = img.load()\n w, h = img.size\n w -= 1\n h -= 1\n\n for i in range(0, w):\n for j in range(0, h):\n sim = -1\n # 认为这个点是噪点\n for k in range(i - radius, i + radius):\n if k < 1 or k > w:\n continue\n for q in range(j - radius, j + radius):\n if q <= 1 or q > h:\n continue\n if abs(pix[k, q] - pix[i, j]) < 40:\n sim += 1\n # 该点与周围点相似度较小,认为是噪点\n if sim < m:\n tmp = func(pix, i, j, radius, w, h)\n pix[i, j] = tmp\n\n # 平均值去噪\n # 用噪点领域的内的平均值代替噪点\n def ave_of_rec(self, pix, x, y, radius, w, h):\n p = 0\n num = 0\n\n for i in range(x - radius, x + radius):\n if i < 1 or i > w:\n continue\n for j in range(y - radius, y + radius):\n if j <= 1 or j > h:\n continue\n # 排除极值点\n if pix[i, j] > 15 and pix[i, j] < 230:\n p += pix[i, j]\n num += 1\n return p // num\n\n # 中值去噪\n # 用噪点领域的中值代替噪点\n def mid_of_rec(self, pix, x, y, radius, w, h):\n p = []\n\n for i in range(x - radius, x + radius):\n if i < 1 or i > w:\n continue\n for j in range(y - radius, y + radius):\n if j <= 1 or j > h:\n continue\n # 排除极值点\n if pix[i, j] > 15 and pix[i, j] < 230:\n p.append(pix[i, j])\n\n p.sort()\n m = len(p) // 2\n '''\n if m <= 0:\n px = x + radius\n py = y + radius\n if px > w:\n px = w\n if py > h:\n py = h\n\n return pix[px, py]\n else:\n '''\n return (p[m] + p[-m]) // 2\n\n def clear_noise_mid(self):\n\n # 打开图片\n image = Image.open(\"./test.jpeg\").convert('L')\n img = Image.open(\"./原图.bmp\").convert('L')\n\n # 采用中值去噪\n self.clearNoise(image, 3, 9, self.mid_of_rec)\n\n # 保存图片\n image.save(\"./中值去噪.jpeg\")\n pix_1 = image.load()\n pix_2 = img.load()\n\n image.close()\n img.close()\n\n return (pix_1, pix_2, image.size)\n\n def clear_noise_ave(self):\n\n # 打开图片\n image = Image.open(\"./test.jpeg\").convert('L')\n img = Image.open(\"./原图.bmp\").convert('L')\n\n # 采用均值去噪\n self.clearNoise(image, 3, 9, self.ave_of_rec)\n\n # 保存图片\n image.save(\"./均值去噪.jpeg\")\n pix_1 = image.load()\n pix_2 = img.load()\n\n image.close()\n img.close()\n\n return (pix_1, pix_2, image.size)\n","sub_path":"features/spatial_denoising/clear_noise.py","file_name":"clear_noise.py","file_ext":"py","file_size_in_byte":3240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"116841390","text":"import requests, bs4, sys, time, platform, webbrowser\r\nimport PySimpleGUI as sg\r\nfrom exports import scrapeGuide1, scrapeGuide2, scrapeGuide3, champions, window, checkbox, misspell_window\r\n\r\nMOBAFIRE_URL = 'https://mobafire.com'\r\n\r\n# ask for input\r\nwhile True:\r\n event, values = window.read()\r\n if event == 'Ok':\r\n champion = values[0].lower()\r\n break\r\n else:\r\n window.close()\r\n sys.exit()\r\nwindow.close()\r\n\r\n# time program if checkbox is checked\r\ntimeFlag = checkbox.Get()\r\nif timeFlag:\r\n startTime = time.time()\r\n\r\nif champion in champions:\r\n champion = champions[champion]\r\n\r\n# make request for mobafire\r\nmobafireLink = MOBAFIRE_URL + '/league-of-legends/' + champion + '-guide'\r\nmobaRes = requests.get(mobafireLink)\r\ntry:\r\n mobaRes.raise_for_status()\r\nexcept:\r\n misspell_window.read()\r\n sys.exit()\r\n\r\nmobaSoup = bs4.BeautifulSoup(mobaRes.text, 'html.parser')\r\n\r\n# get guide titles\r\ntitles = mobaSoup.select('.browse-list h3')\r\nguide1Title = titles[0].text\r\nguide2Title = titles[1].text\r\nguide3Title = titles[2].text\r\n\r\n# get guide links\r\nguides = mobaSoup.select('.browse-list a')\r\nguide1Link = MOBAFIRE_URL + guides[0].attrs['href']\r\nguide2Link = MOBAFIRE_URL + guides[1].attrs['href']\r\nguide3Link = MOBAFIRE_URL + guides[2].attrs['href']\r\n\r\n# scrape web pages\r\nresults = ['', '', '']\r\nscrapeGuide1(guide1Link, results)\r\nscrapeGuide2(guide2Link, results)\r\nscrapeGuide3(guide3Link, results)\r\n\r\n# write to html file and open\r\nhtmlFile = open('lol.html', 'wb')\r\nguide1 = '
'.join(results[0].split('\\n'))\r\nguide2 = '
'.join(results[1].split('\\n'))\r\nguide3 = '
'.join(results[2].split('\\n'))\r\nhtml = '{} guides

{}

{}

{}

{}

{}

{}

'.format(champion, guide1Title, guide1, guide2Title, guide2, guide3Title, guide3).encode('utf-8')\r\nhtmlFile.write(html)\r\nhtmlFile.close()\r\nwebbrowser.open('lol.html')\r\n\r\nendTime = time.time()\r\n\r\n# write time to times.txt\r\nif timeFlag:\r\n timeFile = open('times.txt', 'a')\r\n timeFile.write(platform.system() + ' (serial) ' + str(endTime-startTime) + '\\n')\r\n timeFile.close()\r\n import time_analysis\r\n","sub_path":"lols.py","file_name":"lols.py","file_ext":"py","file_size_in_byte":2258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"266677394","text":"'程序运行主体'\nimport os\nimport re\nimport itchat\nimport helper\nfrom helper import Helper\nfrom ucas import EXCEPTIONS\n\nHELPER = Helper()\nADMIN_HELP = '''?data.csv? None\n?remind alive? None\n?user? None\n?save time? \\\\f\n?remind wait? \\\\f\n?remind before? \\\\f\n?course dict? \\\\d:\\\\d:\\\\d\n?send? 用户:\\\\s 内容:\\\\s'''\n\n@itchat.msg_register(itchat.content.TEXT)\ndef reply(msg):\n '回复函数'\n try:\n now_user = msg['FromUserName']\n text = msg['Text']\n user = itchat.search_friends(userName=now_user)\n nick_name = user['NickName']\n keys_1 = ['重新绑定', '取消绑定', '取消提醒', '打开提醒', '文字课表']\n keys_2 = ['绑定', '退课', '选课', '更新', '保存', '提醒', '课表']\n keys_3 = ['???', '???']\n if '?data.csv?' in text:\n itchat.send('@fil@data.csv', now_user)\n elif '?remind alive?' in text:\n if HELPER.remind_alive:\n HELPER.remind_alive = False\n else:\n HELPER.remind_alive = True\n HELPER.remind()\n itchat.send('remind_alive已更改', now_user)\n elif '?save time?' in text:\n helper.SAVE_TIME = float(re.findall(r'(\\d+\\.?\\d*)', text)[0])\n itchat.send('SAVE_TIME改为%f分钟' % helper.SAVE_TIME, now_user)\n elif '?remind wait?' in text:\n helper.REMIND_WAIT = float(re.findall(r'(\\d+\\.?\\d*)', text)[0])\n itchat.send('REMIND_WAIT改为%f分钟' % helper.REMIND_WAIT, now_user)\n elif '?remind before?' in text:\n helper.REMIND_BEFORE = float(re.findall(r'(\\d+\\.?\\d*)', text)[0])\n itchat.send('REMIND_BEFORE改为%f分钟' % helper.REMIND_BEFORE, now_user)\n elif '?course dict?' in text:\n result = re.findall(r'(\\d+):(\\d+):(\\d+)', text)[0]\n helper.COURSE_DICT[result[0]] = [int(result[1]), int(result[2])]\n itchat.send(\"COURSE_DICT['%d']改为(%d, %d)\" % result, now_user)\n elif '?send?' in text:\n result = re.findall(r'用户[::\\s]*(.+?)\\s*内容[::\\s]*(.*)$', text)\n HELPER.send(result[0][1], result[0][0])\n itchat.send('发送成功', now_user)\n elif '?user?' in text:\n itchat.send(', '.join([user['nick_name'] for user in HELPER.user_list]), now_user)\n elif '?admin?' in text:\n itchat.send(ADMIN_HELP, now_user)\n HELPER.admins = nick_name\n elif '重新绑定' in text:\n HELPER.change_user(now_user, nick_name, text)\n elif '取消绑定' in text:\n HELPER.del_user(now_user, nick_name)\n elif '取消提醒' in text:\n HELPER.cancel_remind(now_user, nick_name)\n elif '打开提醒' in text:\n HELPER.remind(now_user, nick_name)\n elif '???' in text:\n HELPER.help(now_user, [keys_2, keys_1, keys_3[:1]])\n elif '???' in text:\n HELPER.help(now_user, [keys_2, keys_1, keys_3[:1]])\n elif '文字课表' in text:\n if '编号' in text:\n HELPER.show_course_list(now_user, nick_name, False, is_with_num=True)\n else:\n HELPER.show_course_list(now_user, nick_name, False)\n elif '保存' in text:\n HELPER.save_user_list(now_user)\n elif '退课' in text:\n HELPER.drop_course(now_user, nick_name, text)\n elif '选课' in text:\n HELPER.add_course(now_user, nick_name, text)\n elif '更新' in text:\n HELPER.remind_list_update(now_user, nick_name)\n elif '提醒' in text:\n HELPER.show_remind_list(now_user, nick_name)\n elif '课表' in text:\n HELPER.show_course_list(now_user, nick_name)\n elif '绑定' in text:\n HELPER.add_user(now_user, nick_name, text)\n else:\n itchat.send(Helper.get_response(text), now_user)\n except EXCEPTIONS as error:\n HELPER.my_error(error, now_user, False)\n\n@itchat.msg_register(itchat.content.FRIENDS)\ndef add_friend(msg):\n '自动接受好友申请'\n itchat.add_friend(**msg['Text'])\n itchat.send_msg('Nice to meet you!', msg['RecommendInfo']['UserName'])\n itchat.send('你可以试着输入\"???\"来查看帮助信息', msg['RecommendInfo']['UserName'])\n\ndef main(hot=True):\n '开始运行'\n BASE_DIR = os.path.dirname(os.path.abspath(__file__))\n PIC_DIR = os.path.join(BASE_DIR, 'QR.jpg')\n itchat.auto_login(picDir=PIC_DIR, hotReload=hot)\n HELPER.friends = itchat.get_friends(update=True)\n HELPER.remind()\n HELPER.auto_save()\n itchat.run()\n\nif __name__ == '__main__':\n try:\n main()\n except EXCEPTIONS as error:\n HELPER.my_error(error)\n HELPER.save_user_list()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"378763555","text":"import socket\n \ndef Main():\n while True:\n host = '192.168.43.125'\n port = 21567\n\n message = input(\" -> \") \n mySocket = socket.socket()\n mySocket.connect((host,port))\n mySocket.send(message.encode())\n mySocket.close()\n \nif __name__ == '__main__':\n Main()\n","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"409433790","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 16 12:40:46 2015\n\n@author: mfj\n\"\"\"\n\nSTATUS_LIST = 0\nSTATUS_STASH = 1\n\nLOC_FRIDGE = 0\nLOC_FREEZER = 1\nLOC_PANTRY = 2\nLOC_NONE = 3\n\nLOCATION_STRINGS = {\n LOC_FRIDGE: \"Fridge\",\n LOC_FREEZER: \"Freezer\",\n LOC_PANTRY: \"Pantry\",\n LOC_NONE: \"-\"\n}\n\nSTATUS_STRINGS = {\n STATUS_LIST: \"Shopping List\",\n STATUS_STASH: \"Stash\"\n}","sub_path":"fridge/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"230214472","text":"##################################################\n# Script for docking DUD-E compounds in parallel #\n##################################################\n\n# Usage: python dock.py num_processes\n# e.g. dock.py DUD-E/all/ampc/ 64\n\nfrom multiprocessing import Pool\nimport glob\nfrom os import path\nfrom subprocess import call\nimport sys\nimport time\n\nSMINA_PATH=\"./smina\"\n\n\ndef dock(ligand):\n \"\"\"Function that calls smina to dock a ligand.\"\"\"\n ligname = path.splitext(ligand)[0]\n # Call vina process\n call([SMINA_PATH, \"-r\", receptor, \"-l\", ligand, \"--autobox_ligand\",\n crystal_ligand, \"--num_modes\", str(num_modes), \"-o\",\n ligname + \"_out.pdbqt\", \"--cpu\", \"1\"])\n\n\nif __name__ == \"__main__\":\n # User inputs\n target_path = sys.argv[1].rstrip(\"/\")\n n_cpus = int(sys.argv[2])\n # Docking parameters\n receptor = target_path + \"/receptor.pdbqt\"\n crystal_ligand = target_path + \"/crystal_ligand.mol2\"\n ligands = glob.glob(target_path + \"/*/*.pdbqt\")\n num_modes = 12\n start_time = time.time()\n # Start process pool\n with Pool(n_cpus) as pool:\n pool.map(dock, ligands, chunksize=10)\n stop_time = time.time()\n print(\"Job completed in:\", stop_time - start_time, \"seconds\")\n","sub_path":"docking/dock.py","file_name":"dock.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"194245497","text":"#!flask/bin/python\n\n\"\"\"\nSan Francisco Movie Locations, Web Application\n:author Jordan Limbach \n\"\"\"\n\nimport json\nimport requests\nfrom geopy.geocoders import GoogleV3\nfrom flask import Flask, jsonify, request\n\n\nconfig_file = open('config.json', 'r')\nconfig = json.load(config_file)\nconfig_file.close()\n\napp = Flask(__name__, static_url_path=None, static_folder='app')\nsf_movie_ep = ':'.join([config['sf_movie_service_host'],\n str(config['sf_movie_service_port'])])\ngeolocator = GoogleV3()\n\n\n@app.route('/')\ndef index():\n \"\"\" Returns single page application \"\"\"\n return app.send_static_file('index.html')\n\n\n@app.route('/assets/')\ndef static_proxy(path):\n \"\"\" Returns static assets \"\"\"\n asset = '/'.join(['assets', path])\n return app.send_static_file(asset)\n\n\n@app.route('/get_movie/', methods=['GET'])\ndef get_movie(movie_title):\n \"\"\" Gets a particular movie by title \"\"\"\n limit = request.args.get('limit', '') # An optional limit may be passed\n ep = build_endpoint('movie', title=movie_title, limit=limit)\n response = parse_response(requests.get(ep))\n if (response['code'] == 200): # On a successful response, we'll need to geocode addresses\n response['data'] = geocode_addresses(response['data'])\n return jsonify(response)\n\n\n@app.route('/get_movies/')\ndef get_movies(search_term=''):\n \"\"\" Gets a list of movies that match a particular search term \"\"\"\n limit = request.args.get('limit', '') # An optional limit may be passed\n\n # Our application only needs title field for the drop down\n ep = build_endpoint('movies', fields='title', limit=limit)\n response = parse_response(requests.get(ep))\n return jsonify(response)\n\n\ndef build_endpoint(action, fields='', search='', title='', limit=''):\n \"\"\" Builds an endpoint for the movie search API \"\"\"\n api_ep = '{}/{}/{}?fields={}&search={}&title={}'.format(\n sf_movie_ep, action, title, fields, search, title)\n if limit != '':\n api_ep += '&limit={}'.format(limit)\n return api_ep\n # Example: http://localhost:10000/get_movies/?fields=title,location&search=&title=\n\n\ndef parse_response(response):\n \"\"\" Parses a response from the movie search API \"\"\"\n try:\n response = response.json()\n except Exception:\n return {'error': response.text, 'code': 400}\n return response\n\n\ndef geocode_addresses(data):\n \"\"\" Geocodes movie addresses \"\"\"\n if isinstance(data, dict): # Single location\n print(data)\n geocoded = get_geocode(data['locations'])\n if geocoded is not None:\n address, (latitude, longitude) = geocoded\n data['markers'] = [{\n 'id': 1, # Marker requires an ID\n 'name': data['locations'] if data['locations'] is not None else 'None given',\n 'location': {\n 'latitude': latitude if data['locations'] is not None else '',\n 'longitude': longitude if data['locations'] is not None else ''\n }\n }]\n return data\n\n elif isinstance(data, list): # Multiple locations\n movie, i = data[0], 1 # Take the first object for movie data\n movie['markers'] = []\n for m in data:\n geocoded = get_geocode(m['locations'])\n if geocoded is not None:\n address, (latitude, longitude) = geocoded\n movie['markers'].append({\n 'id': i,\n 'name': m['locations'] if m['locations'] is not None else 'None given',\n 'location': {\n 'latitude': latitude if m['locations'] is not None else '',\n 'longitude': longitude if m['locations'] is not None else ''\n }\n })\n i += 1\n return movie\n\n\ndef get_geocode(address):\n \"\"\" Uses geolocator to geocode address \"\"\"\n return geolocator.geocode('{}, {}, {}'.format(\n address, 'San Francisco', 'CA'))\n\n\nif __name__ == '__main__':\n app.run(host=config['movie_app_host'],\n port=config['movie_app_port'],\n debug=['movie_app_debug'])","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"262305903","text":"class Sample:\n\n \"\"\"A representation of a sample obtained from IRIDA\"\"\"\n\n def __init__(self, name, paired_path, unpaired_path):\n \"\"\"\n Initialize a sample instance\n\n :type name: str\n :param name: the name of the sample\n :type path: str\n :param path: the URI to obtain the sample from IRIDA\n \"\"\"\n\n self.name = name\n self.paired_path = paired_path\n self.unpaired_path = unpaired_path\n self._sample_reads = [] # A list of SampleFile/SamplePair objects\n\n def __repr__(self):\n num_files = 0\n for item in self.get_files():\n try:\n for _file in item:\n num_files += 1\n except TypeError:\n num_files += 1\n\n return_string = self.name + \":\\n\"\n return_string += \"\\tPaired path: \" + self.paired_path + \"\\n\"\n return_string += \"\\tSingles path: \" + self.unpaired_path + \"\\n\"\n return_string += \"\\tNumber of files: \" + str(num_files) + \"\\n\"\n return return_string\n\n def add_file(self, new_file):\n self._sample_reads.append(new_file)\n\n def add_pair(self, pair):\n self.add_file(pair)\n\n def get_reads(self):\n return self._sample_reads\n","sub_path":"irida_import/sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"371306842","text":"\nclass Student:\n def __init__(self, name, surname, gender):\n self.name = name\n self.surname = surname\n self.gender = gender\n self.finished_courses = list()\n self.courses_in_progress = list()\n self.grades = dict()\n\n def rate_lecturer(self, lecturer, course, grade):\n if isinstance(lecturer,\n Lecturer) and course in lecturer.courses_attached and course in self.courses_in_progress:\n if grade > 10:\n grade = 10\n elif grade < 1:\n grade = 1\n if course in lecturer.grades:\n lecturer.grades[course] += [grade]\n else:\n lecturer.grades[course] = [grade]\n else:\n return 'Ошибка'\n\n def get_average(self, course=None):\n grades_count = 0\n average = 0\n if course:\n if course in self.grades:\n for grade in self.grades[course]:\n grades_count += 1\n average += grade\n if grades_count:\n return average / grades_count\n else:\n for course in self.grades:\n for grade in self.grades[course]:\n grades_count += 1\n average += grade\n if grades_count:\n return average / grades_count\n return average\n\n def __str__(self):\n result = 'Имя: ' + self.name + '\\n'\n result += 'Фамилия: ' + self.surname + '\\n'\n result += 'Средняя оценка за домашние задание: ' + str(self.get_average()) + '\\n'\n result += 'Курсы в процессе изучения: ' + ', '.join(self.courses_in_progress) + '\\n'\n result += 'Завершенные курсы: ' + ', '.join(self.finished_courses) + '\\n'\n return result\n\n def __lt__(self, other):\n return self.get_average() < other.get_average()\n\n def __le__(self, other):\n return self.get_average() <= other.get_average()\n\n def __gt__(self, other):\n return self.get_average() > other.get_average()\n\n def __ge__(self, other):\n return self.get_average() >= other.get_average()\n\n def __eq__(self, other):\n return self.get_average() == other.get_average()\n\n def __ne__(self, other):\n return self.get_average() != other.get_average()\n\n\nclass Mentor:\n def __init__(self, name, surname):\n self.name = name\n self.surname = surname\n self.courses_attached = []\n\n\nclass Lecturer(Mentor):\n def __init__(self, name, surname):\n super().__init__(name, surname)\n self.grades = {}\n\n def get_average(self, course=None):\n grades_count = 0\n average = 0\n if course:\n if course in self.grades:\n for grade in self.grades[course]:\n grades_count += 1\n average += grade\n if grades_count:\n return average / grades_count\n else:\n for course in self.grades:\n for grade in self.grades[course]:\n grades_count += 1\n average += grade\n if grades_count:\n return average / grades_count\n return average\n\n def __str__(self):\n result = 'Имя: ' + self.name + '\\n'\n result += 'Фамилия: ' + self.surname + '\\n'\n result += 'Закрепленные лекции: ' + ', '.join(self.courses_attached) + '\\n'\n result += 'Средняя оценка за лекции: ' + str(self.get_average()) + '\\n'\n return result\n\n def __lt__(self, other):\n return self.get_average() < other.get_average()\n\n def __le__(self, other):\n return self.get_average() <= other.get_average()\n\n def __gt__(self, other):\n return self.get_average() > other.get_average()\n\n def __ge__(self, other):\n return self.get_average() >= other.get_average()\n\n def __eq__(self, other):\n return self.get_average() == other.get_average()\n\n def __ne__(self, other):\n return self.get_average() != other.get_average()\n\n\nclass Reviewer(Mentor):\n def __init__(self, name, surname):\n super().__init__(name, surname)\n\n def rate_student(self, student, course, grade):\n if isinstance(student, Student) and course in self.courses_attached and course in student.courses_in_progress:\n if grade > 10:\n grade = 10\n elif grade < 1:\n grade = 1\n if course in student.grades:\n student.grades[course] += [grade]\n else:\n student.grades[course] = [grade]\n else:\n return 'Ошибка'\n\n def __str__(self):\n result = 'Имя: ' + self.name + '\\n'\n result += 'Фамилия: ' + self.surname + '\\n'\n return result\n\n\n#\ndef get_averages(instances, course):\n averages_count = 0\n average = 0\n for instance in instances:\n averages_count += 1\n average += instance.get_average(course)\n if averages_count:\n return average / averages_count\n return average\n\n\ndef students_average(students, course):\n return get_averages(students, course)\n\n\ndef lecturers_average(lecturers, course):\n return get_averages(lecturers, course)\n\n\ns = list()\ns.append(Student('Петр', 'Александров', 'м'))\ns.append(Student('Александра', 'Петрова', 'ж'))\n\nl = list()\nl.append(Lecturer('Александр', 'Пушкин'))\nl.append(Lecturer('Дмитрий', 'Менделеев'))\n\nr = list()\nr.append(Reviewer('Владимир', 'Путин'))\nr.append(Reviewer('Дмитрий', 'Медведев'))\n\ns[0].courses_in_progress.append('Литература')\ns[0].courses_in_progress.append('Химия')\ns[0].finished_courses.append('Математика')\n\ns[1].courses_in_progress.append('Литература')\ns[1].courses_in_progress.append('Химия')\ns[1].finished_courses.append('Математика')\n\nl[0].courses_attached.append('Литература')\nl[1].courses_attached.append('Химия')\n\nr[0].courses_attached.append('Литература')\nr[0].courses_attached.append('Химия')\nr[1].courses_attached.append('Литература')\nr[1].courses_attached.append('Химия')\n\ns[0].rate_lecturer(l[0], 'Литература', 10)\ns[0].rate_lecturer(l[1], 'Химия', 9)\n\ns[1].rate_lecturer(l[0], 'Литература', 9)\ns[1].rate_lecturer(l[1], 'Химия', 8)\n\nr[0].rate_student(s[0], 'Литература', 5)\nr[0].rate_student(s[1], 'Химия', 4)\n\nr[1].rate_student(s[0], 'Химия', 2)\nr[1].rate_student(s[1], 'Литература', 3)\n\nprint(s[0].grades)\nprint(s[1].grades)\n\nprint(*s, sep='\\n')\nprint(*l, sep='\\n')\nprint(*r, sep='\\n')\n\nprint('Средняя оценка студентов по литературе:', students_average(s, 'Литература'))\nprint('Средняя оценка студентов по химии:', students_average(s, 'Химия'))\nprint('Средняя оценка лекторов по литературе:', lecturers_average(l, 'Литература'))\nprint('Средняя оценка лекторов по химии:', lecturers_average(l, 'Химия'))\n\nprint('студент 1 < студент 2:', s[0] < s[1])\nprint('студент 1 <= студент 2:', s[0] <= s[1])\nprint('студент 1 > студент 2:', s[0] > s[1])\nprint('студент 1 >= студент 2:', s[0] >= s[1])\nprint('студент 1 == студент 2:', s[0] == s[1])\nprint('студент 1 != студент 2:', s[0] != s[1])\n\nprint('лектор 1 < лектор 2:', s[0] < s[1])\nprint('лектор 1 <= лектор 2:', s[0] <= s[1])\nprint('лектор 1 > лектор 2:', s[0] > s[1])\nprint('лектор 1 >= лектор 2:', s[0] >= s[1])\nprint('лектор 1 == лектор 2:', s[0] == s[1])\nprint('лектор 1 != лектор 2:', s[0] != s[1])","sub_path":"students_and_mentor.py","file_name":"students_and_mentor.py","file_ext":"py","file_size_in_byte":7997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"167604817","text":"import socket\nimport select\nfrom handler import SEHandler\n \nssocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nssocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\nssocket.bind(('127.0.0.1', 8181))\nssocket.listen(1)\nssocket.setblocking(0)\n\nepoll = select.epoll()\nepoll.register(ssocket.fileno(), select.EPOLLIN)\n \ntry:\n handlers = {}\n while True:\n events = epoll.poll(1)\n for fileno, event in events:\n if fileno == ssocket.fileno():\n connection, address = ssocket.accept()\n connection.setblocking(0)\n epoll.register(connection.fileno(), select.EPOLLIN)\n handlers[connection.fileno()] = SEHandler(connection, epoll)\n\n elif event & select.EPOLLIN:\n handlers[fileno].pollin()\n\n elif event & select.EPOLLOUT:\n handlers[fileno].pollout()\n \n elif event & select.EPOLLHUP:\n handlers[fileno].pollhup()\n del handlers[fileno]\nfinally:\n epoll.unregister(ssocket.fileno())\n epoll.close()\n ssocket.close()","sub_path":"ridlab-server.py","file_name":"ridlab-server.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"2243117","text":"from rest_framework.pagination import PageNumberPagination\r\nfrom rest_framework.response import Response\r\nfrom collections import OrderedDict\r\n\r\nclass MyPaginationMixin(object):\r\n\r\n @property\r\n def paginator(self):\r\n \"\"\"\r\n The paginator instance associated with the view, or `None`.\r\n \"\"\"\r\n if not hasattr(self, '_paginator'):\r\n if self.pagination_class is None:\r\n self._paginator = None\r\n else:\r\n self._paginator = self.pagination_class()\r\n return self._paginator\r\n\r\n def paginate_queryset(self, queryset):\r\n \"\"\"\r\n Return a single page of results, or `None` if pagination\r\n is disabled.\r\n \"\"\"\r\n if self.paginator is None:\r\n return None\r\n return self.paginator.paginate_queryset(\r\n queryset, self.request, view=self)\r\n\r\n def get_paginated_response(self, data):\r\n \"\"\"\r\n Return a paginated style `Response` object for the given\r\n output data.\r\n \"\"\"\r\n assert self.paginator is not None\r\n return self.paginator.get_paginated_response(data)\r\n\r\nclass MyPageNumberPagination(PageNumberPagination):\r\n page_size = 200\r\n\r\n def get_paginated_response(self, data):\r\n return Response(OrderedDict([\r\n ('total_page',self.page.paginator.num_pages),\r\n ('page_size', self.page_size),\r\n ('current_page', self.page.number),\r\n ('total_items',self.page.paginator.count),\r\n ('next', self.get_next_link()),\r\n ('previous', self.get_previous_link()),\r\n ('results', data)\r\n ]))\r\n\r\nclass TimelinePagination(PageNumberPagination):\r\n page_size = 5\r\n\r\n def get_paginated_response(self, data):\r\n return Response(OrderedDict([\r\n ('next', self.get_next_link()),\r\n ('results', data)\r\n ]))\r\n","sub_path":"core/utils/pagination.py","file_name":"pagination.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"534503147","text":"from __future__ import print_function\nimport os\nimport torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nimport time\nimport torch.nn.functional as F\nfrom numpy import *\nfrom dataset import train_dataset\nfrom models.Nets2D import MSFCN2D\nfrom early_stopping import EarlyStopping\nfrom measure import SegmentationMetric\n\npath = '2017'\nif path == '2017':\n time_series = 7\nelse:\n time_series = 4\nbatch_size = 1\nniter = 100\ninput_channel = 4\nclass_num = 4\nlearning_rate = 0.0001\nbeta1 = 0.5\ncuda = True\nnum_workers = 1\nsize_h = 256\nsize_w = 256\nflip = 0\nnet = MSFCN2D(time_series, 4, 4)\ndata_path = './' + path + 'data/train'\nval_path = './' + path + 'data/val'\nout_file = './checkpoint/' + net.name\nsave_epoch = 1\ntest_step = 300\nlog_step = 1\nnum_GPU = 1\npre_trained = True\n\ntorch.cuda.set_device(0)\n\ntry:\n os.makedirs(out_file)\n os.makedirs(out_file + '/model/')\nexcept OSError:\n pass\n\nmanual_seed = random.randint(1, 10000)\nrandom.seed(manual_seed)\ntorch.manual_seed(manual_seed)\ncudnn.benchmark = True\n\ntrain_datatset_ = train_dataset(data_path, size_w, size_h, flip, time_series)\nval_datatset_ = train_dataset(val_path, size_w, size_h, 0, time_series)\n\n\ndef weights_init(m):\n class_name = m.__class__.__name__\n if class_name.find('Conv') != -1:\n m.weight.data.normal_(0.0, 0.02)\n m.bias.data.fill_(0)\n elif class_name.find('BatchNorm') != -1:\n m.weight.data.normal_(1.0, 0.02)\n m.bias.data.fill_(0)\n\ntry:\n os.makedirs(out_file)\n os.makedirs(out_file + '/model/')\nexcept OSError:\n pass\nif cuda:\n net.cuda()\nif num_GPU > 1:\n net = nn.DataParallel(net)\n\nif pre_trained:\n net.load_state_dict(torch.load('%s/model/' % out_file + path + 'netG.pth'))\n # print('Load success!')\nelse:\n pass\n # net.apply(weights_init)\n\n########### LOSS & OPTIMIZER ##########\ncriterion = nn.CrossEntropyLoss(ignore_index=255)\noptimizer = torch.optim.Adam(net.parameters(), lr=learning_rate, betas=(beta1, 0.999))\nmetric = SegmentationMetric(4)\nearly_stopping = EarlyStopping(patience=7, verbose=True)\n\nif __name__ == '__main__':\n start = time.time()\n net.train()\n for epoch in range(1, niter+1):\n lr_adjust = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, 10, eta_min=0.0, last_epoch=-1)\n for i in range(0, train_datatset_.__len__(), batch_size):\n train_datatset_next = train_datatset_.__next__()\n train_loader = torch.utils.data.DataLoader(dataset=train_datatset_next, batch_size=batch_size, shuffle=True,\n num_workers=num_workers)\n for initial_image, semantic_image in train_loader:\n # print(initial_image.shape)\n initial_image = torch.reshape(initial_image, (batch_size, time_series*4, 256, 256))\n initial_image = initial_image.cuda()\n semantic_image = semantic_image.cuda()\n\n semantic_image_pred = net(initial_image)\n\n loss = criterion(semantic_image_pred, semantic_image.long())\n # print(loss)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n lr_adjust.step()\n print('[%d/%d][%d/%d] Loss: %.4f' %\n (epoch, niter, i, len(train_loader) * batch_size, loss.item()))\n\n for i in range(0, val_datatset_.__len__(), batch_size):\n with torch.no_grad():\n net.eval()\n val_datatset_next = val_datatset_.__next__()\n val_loader = torch.utils.data.DataLoader(dataset=val_datatset_next, batch_size=batch_size, shuffle=True,\n num_workers=num_workers)\n for initial_image, semantic_image in val_loader:\n # print(initial_image.shape)\n initial_image = torch.reshape(initial_image, (batch_size, time_series * 4, 256, 256))\n initial_image = initial_image.cuda()\n semantic_image = semantic_image.cuda()\n\n semantic_image_pred = net(initial_image)\n\n loss = criterion(semantic_image_pred, semantic_image.long())\n semantic_image_pred = F.softmax(semantic_image_pred.squeeze(), dim=0)\n semantic_image_pred = semantic_image_pred.argmax(dim=0)\n\n semantic_image = torch.squeeze(semantic_image.cpu(), 0)\n semantic_image_pred = torch.squeeze(semantic_image_pred.cpu(), 0)\n\n metric.addBatch(semantic_image_pred, semantic_image)\n acc = metric.pixelAccuracy()\n mIoU = metric.meanIntersectionOverUnion()\n kappa = metric.kappa()\n print('acc: ', acc)\n print('mIoU: ', mIoU)\n print('kappa', kappa)\n metric.reset()\n net.train()\n\n early_stopping(1 - mIoU, net, '%s/model/' % out_file + path + 'netG.pth')\n\n if early_stopping.early_stop:\n break\n\n end = time.time()\n print('Program processed ', end - start, 's, ', (end - start)/60, 'min, ', (end - start)/3600, 'h')","sub_path":"train_2DMSFCN.py","file_name":"train_2DMSFCN.py","file_ext":"py","file_size_in_byte":5146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"52409872","text":"import deathbycaptcha\r\nfrom PIL import Image\r\nimport string\r\nimport random\r\nimport os\r\nimport time\r\nimport sys\r\n\r\n\r\n\r\ndef wait_start(runTime, action):\r\n print('Esperando hasta las 3 AM')\r\n startTime = time(*(map(int, runTime.split(':'))))\r\n while startTime > datetime.today().time(): # you can add here any additional variable to break loop if necessary\r\n sleep(1)# you can change 1 sec interval to any other\r\n return\r\n\r\ndef getdata(partida,browser,data):\r\n resuelto = False\r\n contador = 0\r\n while resuelto == False:\r\n try:\r\n a = browser.find_element_by_xpath('//*[@id=\"cuerpo\"]/div[2]/div[1]/table/tbody/tr[1]/td/div').text\r\n if str(a) == \"Horario reservado para tareas de mantenimiento del sistema.\":\r\n print(\"Horario de mantenimiento vuelva mas tarde, Esperando\")\r\n wait_start('3:00', lambda: act(100))\r\n resuelto = False\r\n elif str(a) == \"Verifique el código de seguridad.\":\r\n print (\"Error de captcha en la partida \" + partida + \"- Reintentando\")\r\n resuelto = False\r\n return \"Error 5\"\r\n elif str(a) == \"Error en sistema, intente más tarde. Si persiste, contáctenos\":\r\n print (\"Partida \" + partida + \" Genero error en el sistema - Esperando 10 minutos\")\r\n contador += 1\r\n if contador == 3:\r\n print (\"Reintentando 3 veces - Saliendo del programa\")\r\n sys.exit()\r\n time.sleep(600)\r\n else:\r\n resuelto = True\r\n except:\r\n try:\r\n situ = browser.find_element_by_xpath('//*[@id=\"frm_liq\"]/table/tbody/tr[2]/td/fieldset/table/tbody/tr/td[2]/b').text\r\n texto = browser.find_element_by_xpath('//*[@id=\"frm_liq\"]/table/tbody/tr[2]/td/fieldset/table/tbody/tr/td[1]').text\r\n #data = {}\r\n cuentalinea = 0\r\n datos = str(texto)\r\n for linea in datos.splitlines():\r\n strsal = \"\"\r\n if cuentalinea == 0:\r\n data[\"cuit\"] = linea[-13:]\r\n #cuit = linea[-13:]\r\n elif cuentalinea == 2:\r\n largo = len(linea)\r\n ppio = len('Ubicación del inmueble: ')\r\n tot = largo - ppio\r\n tot = tot * -1\r\n ubi = linea[tot:]\r\n data[\"ubi\"] = ubi\r\n strsal = strsal + ubi\r\n elif cuentalinea == 3:\r\n zona = len('Zona: ')\r\n prop = ' Tipo Propietario: '\r\n lonprop = len(prop)\r\n posta = linea.find(prop)\r\n zon = linea[zona:posta]\r\n data[\"zona\"]= zon\r\n #print(\"zona \" + zon)\r\n largo = len(linea)\r\n alfa = posta + lonprop\r\n desde = largo - alfa\r\n desde = desde * -1\r\n prop = linea[desde:]\r\n data[\"prop\"] = prop\r\n cuentalinea = cuentalinea + 1\r\n resuelto = True\r\n #\tstrsal = cuit + \";\" + ubi +\";\" + zon + \";\" + prop + \";\" + sit\r\n data[\"cap\"] = situ\r\n print ('Datos de partida ' + partida + ' OK')\r\n return data\r\n except:\r\n print ('No se puede obtener la info. Saliendo del programa')\r\n sys.exit()\r\n","sub_path":"api-old.py","file_name":"api-old.py","file_ext":"py","file_size_in_byte":3673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"36871580","text":"from PIL import Image\nfrom PIL import ImageFilter\nimport pytesseract\nimport pickle\n\n\nclass SsProcessor:\n\n def __init__(self, debug_level=0):\n self.debug_level = debug_level\n\n def log(self, *message):\n # meant for logging to a file ONLY\n # if self.debug_level == 1:\n\n #log to a file and print\n #TODO: file logging\n if self.debug_level == 2:\n print(\"[DEBUG2] %s\" % list(message))\n\n def show_image(self, image):\n if self.debug_level == 2:\n image.show()\n\n def get_item_images(self, filename, target_item, zoom):\n \"\"\"\n Takes a full screenshot, returns 3 small images: item count, name, and buy price\n \"\"\"\n img_file = Image.open(filename)\n crop_count = img_file.crop((783, 409+(target_item*44), 804, 420+(target_item*44)))\n crop_count = crop_count.resize((crop_count.size[0]*zoom, crop_count.size[1]*zoom), Image.ANTIALIAS)\n crop_name = img_file.crop((814, 400+(target_item*44), 955, 415+(target_item*44)))\n crop_name = crop_name.resize((crop_name.size[0]*zoom, crop_name.size[1]*zoom), Image.ANTIALIAS)\n crop_buy = img_file.crop((1201, 407+(target_item*44), 1365, 423+(target_item*44)))\n crop_buy = crop_buy.resize((crop_buy.size[0]*zoom, crop_buy.size[1]*zoom), Image.ANTIALIAS)\n\n return crop_count, crop_name, crop_buy\n\n def get_value(self, image, digit=False, single_digit = False, use_unsharp=False, us_radius=2, us_percent=200, us_threshold=2):\n config_string = ''\n\n if use_unsharp:\n image = image.filter(ImageFilter.UnsharpMask(radius=us_radius, percent=us_percent, threshold=us_threshold))\n\n if digit:\n config_string += ' --user-words digits'\n\n if single_digit:\n config_string += ' -psm 10'\n\n value = pytesseract.image_to_string(image, config=config_string)\n\n return value\n\n def get_item_count(self, image):\n \"\"\"\n Takes get_item_image[0] to get the item count, returns the item count as a string\n \"\"\"\n item_count_image = image.copy()\n\n def get_buy_price(self, image):\n \"\"\"\n Takes get_item_image[2] to return a tuple of gold,silver,copper values\n \"\"\"\n #Get the full buy price and return gold, silver, copper\n r_tuple = (160, 20)\n g_tuple = (160, 20)\n b_tuple = (160, 20)\n\n clean_image = self.isolate_colors(image, r_tuple, g_tuple, b_tuple)\n clean_bw = self.intensify(clean_image, 220)\n value_list = self.slice_characters(clean_bw, 20, 150)\n\n gold_value = \"\"\n silver_value = \"\"\n copper_value = \"\"\n\n gold_exists, silver_exists, copper_exists = self.what_coins(image)\n\n if gold_exists:\n g_image = value_list[0].copy()\n\n if g_image.size[0] < 200:\n self.log(\"Doubling gold image.\")\n bigger_image = Image.new(\"RGB\", (g_image.size[0]*2, g_image.size[1]))\n bigger_image.paste(g_image, (0, 0))\n bigger_image.paste(g_image, (g_image.size[0], 0))\n self.show_image(bigger_image)\n gold_value = self.get_value(bigger_image, digit=True)\n gold_value = gold_value[:int(len(gold_value)/2)]\n\n else:\n self.show_image(g_image)\n gold_value = self.get_value(g_image, digit=True)\n\n del(value_list[0])\n\n if silver_exists:\n s_image = value_list[0].copy()\n\n if s_image.size[0] < 200:\n self.log(\"Doubling silver image.\")\n bigger_image = Image.new(\"RGB\", (s_image.size[0]*2, s_image.size[1]))\n bigger_image.paste(s_image, (0, 0))\n bigger_image.paste(s_image, (s_image.size[0], 0))\n self.show_image(bigger_image)\n silver_value = self.get_value(bigger_image, digit=True)\n silver_value = silver_value[:int(len(silver_value)/2)]\n\n else:\n self.show_image(s_image)\n silver_value = self.get_value(s_image, digit=True)\n\n del(value_list[0])\n\n if copper_exists:\n c_image = value_list[0].copy()\n\n if c_image.size[0] < 200:\n self.log(\"Doubling copper image.\")\n bigger_image = Image.new(\"RGB\", (c_image.size[0]*2, c_image.size[1]))\n bigger_image.paste(c_image, (0, 0))\n bigger_image.paste(c_image, (c_image.size[0], 0))\n self.show_image(bigger_image)\n copper_value = self.get_value(bigger_image, digit=True)\n copper_value = copper_value[:int(len(copper_value)/2)]\n\n else:\n self.show_image(c_image)\n copper_value = self.get_value(c_image, digit=True)\n\n return gold_value, silver_value, copper_value\n\n def isolate_colors(self, image, r_tuple, g_tuple, b_tuple):\n \"\"\"\n Used to isolate text colors of the cost values\n Returns a clean image with only the colors requested in the tuple.\n \"\"\"\n new_image = image.copy()\n r_hi, r_lo = r_tuple[0], r_tuple[1]\n g_hi, g_lo = g_tuple[0], g_tuple[1]\n b_hi, b_lo = b_tuple[0], b_tuple[1]\n\n pixels = new_image.load()\n\n for i in range(new_image.size[0]):\n for j in range(new_image.size[1]):\n\n if abs(pixels[i, j][0] - pixels[i, j][1]) < 8:\n pixels[i, j] = (255, 255, 255)\n\n if pixels[i, j][0] > pixels[i, j][1]:\n pixels[i, j] = (255, 255, 255)\n\n if (r_lo < pixels[i, j][0] < r_hi) and (g_lo < pixels[i, j][1] < g_hi) and (b_lo < pixels[i, j][2] < b_hi):\n pixels[i, j] = pixels[i, j]\n else:\n pixels[i, j] = (255, 255, 255)\n\n return new_image\n\n def isolate_whites(self, image):\n \"\"\"\n Used to isolate whites of count value\n returns clean image with only count value image\n \"\"\"\n #TODO: Implement this.\n print()\n\n def what_coins(self, image):\n new_image = image.copy()\n pixels = new_image.load()\n\n gold = False\n silver = False\n bronze = False\n\n gold_x = None\n silver_x = None\n bronze_x = None\n\n for i in range(image.size[0]):\n for j in range(image.size[1]):\n if (150 < pixels[i, j][0] < 165) and (89 < pixels[i, j][1] < 95) and (6 < pixels[i, j][2] < 17):\n self.log(\"GOLD PIXEL:\", i, j, pixels[i, j])\n gold_x = i\n gold = True\n if (92 < pixels[i, j][0] < 102) and (89 < pixels[i, j][1] < 98) and (91 < pixels[i, j][2] < 107):\n silver = True\n self.log(\"SILVER PIXEL:\", i, j, pixels[i, j])\n if (108 < pixels[i, j][0] < 120) and (62 < pixels[i, j][1] < 67) and (27 < pixels[i, j][2] < 35):\n if gold_x is not None:\n if abs(i - gold_x) < 250:\n self.log(\"FALSE POSITIVE COPPER PIXEL:\", i, j, pixels[i, j])\n bronze = False\n else:\n bronze = True\n self.log(\"COPPER PIXEL:\", i, j, pixels[i, j])\n else:\n self.log(\"COPPER PIXEL:\", i, j, pixels[i, j])\n bronze = True\n\n return gold, silver, bronze\n\n def get_coin_colors(self, image, left, top, right, bottom):\n pix = 0\n coin = image.crop((left, top, right, bottom))\n self.show_image(coin)\n color_list = []\n pixels = coin.load()\n\n for i in range(coin.size[0]):\n for j in range(coin.size[1]):\n color_list.append(pixels[i, j])\n pix += 1\n\n self.log(pix)\n return set(color_list)\n\n def replace_whites(self, image, target_white):\n image = image.convert('L')\n pixels = image.load()\n\n for i in range(image.size[0]):\n for j in range(image.size[1]):\n if pixels[i, j] < int(target_white):\n pixels[i, j] = 0\n return image\n\n def intensify(self, image, target_color):\n image = image.convert('L')\n pixels = image.load()\n\n for i in range(image.size[0]):\n for j in range(image.size[1]):\n if pixels[i, j] > int(target_color):\n pixels[i, j] = 255\n if pixels[i, j] < int(target_color):\n pixels[i, j] = 0\n\n return image\n\n def remove_extra_white(self, image, threshold, buffer):\n pixels = image.load()\n x_trim = 0\n start = 0\n\n for i in range(image.size[0]):\n black_found = False\n\n for j in range(image.size[1]):\n if pixels[i, j] <= threshold:\n black_found = True\n\n if black_found:\n if start == 0:\n start = i\n x_trim = i\n\n image = image.crop((start, 0, x_trim+buffer, image.size[1]))\n\n pixels = image.load()\n\n return image\n\n def slice_characters(self, image, buffer, max_dist):\n pixels = image.load()\n\n start = 0\n end_crop = 0\n\n white_count = 0\n\n return_images = []\n\n for i in range(image.size[0]):\n black = False\n\n for j in range(image.size[1]):\n\n if pixels[i, j] == 0:\n white_count = 0\n end_crop = 0\n black = True\n\n if start == 0:\n start = i\n\n if not black and not (start == 0):\n white_count += 1\n\n if end_crop == 0:\n end_crop = i\n\n if white_count > max_dist:\n return_images.append(image.crop((start-buffer, 0, end_crop+buffer, image.size[1])))\n white_count = 0\n start = 0\n end_crop = 0\n\n return return_images\n\n def remove_coins(self, image):\n with open('copper_colors', 'rb') as f:\n copper_colors = pickle.load(f)\n with open('silver_colors', 'rb') as f:\n silver_colors = pickle.load(f)\n with open('gold_colors', 'rb') as f:\n gold_colors = pickle.load(f)\n\n pixels = image.load()\n\n for x in range(image.size[0]):\n for y in range(image.size[1]):\n if (pixels[x, y] in copper_colors) or (pixels[x, y] in silver_colors) or (pixels[x, y] in gold_colors):\n pixels[x, y] = (255, 255, 255)\n\n return image\n","sub_path":"ss_processor.py","file_name":"ss_processor.py","file_ext":"py","file_size_in_byte":10751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"513518305","text":"from __future__ import print_function\nimport argparse\nimport cv2\nimport torch\n\nfrom torch.autograd import Variable\n\nfrom SqueezeMonoPredictor import SqueezeMonoPredictor\nfrom RotateDataSet import generate_rotated_image\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--model', type=str, default='/home/user/dict.pth', help='path to dataset')\nparser.add_argument('--image', type=str, default='/home/user/1.png', help='path to dataset')\nparser.add_argument('--angle', type=float, default='-2.5', help='rotation angle')\n\nopt = parser.parse_args()\n\nnetwork = SqueezeMonoPredictor(1, False)\nnetwork.load_state_dict(torch.load(opt.model))\nnetwork = network.cpu()\nnetwork.eval()\n\nimage = cv2.imread(opt.image, cv2.IMREAD_GRAYSCALE)\ndemo = generate_rotated_image(image, opt.angle, size=(image.shape[1]/2, image.shape[0]/2), crop_center=True,\n crop_largest_rect=True\n )\nrotated_image = generate_rotated_image(image, opt.angle, size=(256, 256), crop_center=True,\n crop_largest_rect=True\n )\ntensor = torch.from_numpy(rotated_image).float()\ntensor = tensor.unsqueeze(0).unsqueeze(0)\ntensor /= 255\ntensor = Variable(tensor)\n\noutput = network(tensor)\nestimated_angle = output.squeeze()\nresult = float(estimated_angle[0])\nprint( 'actual_angle = ' , opt.angle, ' estimated_angle ', float(estimated_angle[0]))\n\ncv2.imshow('actual = ' + str(opt.angle) + ' estimated = ' + str(result), demo)\ncv2.waitKey(0)\n","sub_path":"deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"63209786","text":"import logging\nimport unohelper\n\nfrom com.sun.star.frame import XDispatchProvider\nfrom com.sun.star.lang import XInitialization\n\n\n#logging.basicConfig(filename='/tmp/complex_toolbar.txt', level=logging.DEBUG)\n\nclass SampleHandler(unohelper.Base, XDispatchProvider, XInitialization):\n def __init__(self, ctx):\n self.ctx = ctx\n self.frame = None\n self.toolkit = ctx.getServiceManager(). \\\n createInstanceWithContext(\"com.sun.star.awt.Toolkit\", ctx)\n\n def initialize(self, objs):\n if len(objs) > 0:\n self.frame = objs[0]\n return\n\n def queryDispatch(self, url, target, searchflags):\n if url.Protocol == \"addons.ExtendingLibreOffice.ComplexToolbar.DummyProtocol:\":\n smgr = self.ctx.getServiceManager()\n dispatch = smgr.createInstanceWithArgumentsAndContext(\n \"addons.ExtendingLibreOffice.ComplexToolbar.SampleDispatch\",\n (self.frame, ), self.ctx)\n return dispatch\n return None\n\n def queryDispatches(self, requests):\n result = []\n for item in requests:\n result.append(self.queryDispatch(item.FeatureURL, item.FrameName, item.SearchFlags))\n return tuple(result)\n\n\ng_ImplementationHelper = unohelper.ImplementationHelper()\ng_ImplementationHelper.addImplementation(\n SampleHandler,\n \"addons.ExtendingLibreOffice.ComplexToolbar.SampleHandler\",\n (\"com.sun.star.frame.ProtocolHandler\",), )\n","sub_path":"src/ComplexToolbar/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"271313788","text":"import pkg_resources\nfrom helga import settings\nfrom helga.extensions.base import (HelgaExtension,\n CommandExtension)\nfrom helga.extensions.core import (ControlExtension,\n HelpExtension,\n IgnoreExtension)\nfrom helga.log import setup_logger\n\n\nlogger = setup_logger(__name__)\n\n\nclass ExtensionRegistry(object):\n\n def __init__(self, bot, load=True):\n self.bot = bot\n self.extensions = {'commands': set(), 'contexts': set()}\n self.extension_names = set()\n self.disabled_extensions = {} # Per-channel blacklist\n self.core = set()\n\n if load:\n self.load()\n\n def _make_import_args(self, path):\n return path, {}, {}, [path.split('.')[-1]]\n\n def load_module_members(self, cls):\n try:\n if issubclass(cls, HelgaExtension) and cls.NAME not in self.extension_names:\n category = 'commands' if self._is_command(cls) else 'contexts'\n self.extensions[category].add(cls(bot=self.bot))\n self.extension_names.add(cls.NAME)\n except (TypeError, AttributeError):\n # Either it's not a class, or it doesn't have ``NAME``\n logger.error('Attempted to load a non-plugin: %s' % repr(cls))\n\n def load(self):\n for module in _load_library_extensions():\n if module.NAME in settings.EXTENSIONS:\n logger.debug('Loading extension extension %s' % repr(module))\n self.load_module_members(module)\n\n # XXX Core has already loaded :/\n # Core loading\n self.core = set([\n ControlExtension(self, self.bot),\n HelpExtension(self, self.bot),\n IgnoreExtension(self.bot),\n ])\n\n def _is_command(self, ext):\n \"\"\"\n Checks if an extension is a command or not\n \"\"\"\n try:\n return issubclass(ext, CommandExtension)\n except TypeError:\n return False\n\n def _call_extension_method(self, fn, message):\n \"\"\"\n Calls a function name for all extensions\n \"\"\"\n # TODO: process core first\n\n # Nested for your pleasure\n def call_fn(fn, message, category):\n for ext in self.extensions[category]:\n if self.is_disabled(ext, message.channel):\n logger.debug('Skipping disabled extension %s on %s' % (ext.NAME, message.channel))\n continue\n\n getattr(ext, fn)(message)\n\n if message.has_response:\n return\n\n # Do cores first\n for ext in self.core:\n getattr(ext, fn)(message)\n if message.has_response:\n return\n\n # This is kind of crappy, but commands should go first\n call_fn(fn, message, 'commands')\n\n # The other ones\n if not message.has_response:\n call_fn(fn, message, 'contexts')\n\n def preprocess(self, message):\n \"\"\"\n Used to do any message preprocessing. i.e. transforming things\n \"\"\"\n self._call_extension_method('preprocess', message)\n\n def process(self, message):\n self._call_extension_method('process', message)\n\n def is_disabled(self, name, channel):\n \"\"\"\n Returns True or False if extension is disabled on the given channel\n \"\"\"\n # If it's an extension class/object\n if hasattr(name, 'NAME'):\n name = name.NAME\n\n return name in self.disabled_extensions.get(channel, set())\n\n def is_enabled(self, name, channel):\n return not self.is_disabled(name, channel)\n\n def disable(self, name, channel):\n \"\"\"\n Disables the use of a named extension on a given channel\n \"\"\"\n if channel not in self.disabled_extensions:\n self.disabled_extensions[channel] = set()\n\n if name not in self.extension_names:\n return False\n\n logger.info('Disabling %s on %s' % (name, channel))\n self.disabled_extensions[channel].add(name)\n\n return True\n\n def enable(self, name, channel):\n \"\"\"\n Enables the use of a named extension on a given channel\n \"\"\"\n if channel not in self.disabled_extensions:\n self.disabled_extensions[channel] = set()\n\n if name not in self.extension_names:\n return False\n\n logger.info('Enabling %s on %s' % (name, channel))\n self.disabled_extensions[channel].discard(name)\n\n return True\n\n def get_enabled(self, channel):\n \"\"\"\n Returns a set of extensions enabled on this channel\n \"\"\"\n return self.extension_names - self.get_disabled(channel)\n\n def get_disabled(self, channel):\n \"\"\"\n Returns a set of extensions disabled on this channel\n \"\"\"\n return self.disabled_extensions.get(channel, set())\n\n def get_all_extensions(self, core=False):\n extensions = self.extensions['commands'].union(self.extensions['contexts'])\n\n if core:\n extensions = extensions.union(self.core)\n\n return extensions\n\n def get_commands(self):\n return self.extensions['commands']\n\n def get_contexts(self):\n return self.extensions['contexts']\n\n def is_extension_name(self, name):\n return name in self.extension_names\n\n\n#\n# Plugin loader Helpers\n#\n\ndef _load_library_extensions():\n \"\"\"\n Locate all setuptools entry points by the name 'helga_handlers'\n and initialize them.\n Any third-party library may register an entry point by adding the\n following to their setup.py::\n\n entry_points = {\n 'helga_handlers': [\n 'plugin_name = mylib.mymodule:Handler_Class',\n ],\n },\n\n \"\"\"\n group = 'helga_handlers'\n entry_points = pkg_resources.iter_entry_points(group=group)\n plugins = []\n for ep in entry_points:\n try:\n logger.debug('loading entry_point %s' % ep.name)\n plugin = ep.load()\n plugin._helga_name_ = ep.name\n plugins.append(plugin)\n except Exception as error:\n logger.error(\"Error initializing plugin %s: %s\" % (ep, error))\n return plugins\n","sub_path":"helga/extensions/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"292332549","text":"from pymongo import MongoClient\nimport random\nclient = MongoClient('localhost', 27017) # when dev, using this\n\ntotalWeeks = 5\ndef setupTimeline():\n db = client.Jobs\n db.Statistic.drop()\n locations = list(db.Jobs.distinct(\"Location\", {}))\n for location in locations:\n jobs = db.Jobs.find({\"Location\": location})\n totalJobs = 0\n totalSalaries = 0\n for job in jobs:\n highestSalary = int(job[\"HighestSalary\"])\n if highestSalary == 999:\n highestSalary = 300 # to reduce the weight of thi sjob\n totalSalaries = totalSalaries + highestSalary\n totalJobs += 1\n averageJobIn1Week = totalJobs / totalWeeks\n averageSalaryIn1Week = totalSalaries / totalWeeks\n i = 1\n while i < 21:\n ran = random.uniform(0, 1)\n randomAvg = random.uniform(-0.1, 0.1)\n weight = i + ran\n totalSalaries = averageSalaryIn1Week * (weight + randomAvg)\n totalJobs = int(averageJobIn1Week * weight)\n timeline ={\n \"Location\": location,\n \"Week\": i,\n \"Year\": '2018',\n 'TotalJobs': int(totalJobs),\n 'TotalSalaries': round(totalSalaries,2),\n 'AverageSalary': round(totalSalaries / totalJobs,2)\n }\n db.Statistic.insert(timeline)\n i += 1\n \n\nsetupTimeline()","sub_path":"API/MockData/createTimeline.py","file_name":"createTimeline.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"16111009","text":"import time\r\nimport random\r\n\r\nname = input('What`s your name?')\r\nprint(f'Hello, {name}!')\r\n\r\ntime.sleep(1)\r\n\r\nquestion = input('Wanna play?(yes/no)')\r\nif question == 'yes':\r\n print('OK, let`s play!')\r\nelse:\r\n print('OK, Bye!')\r\n quit()\r\n\r\n\r\nprint('Start guessing...')\r\ntime.sleep(0.5)\r\n\r\nwords = [\"sunday\", \"secret\", \"hangman\", \"thunder\", \"python\"]\r\nsecret_word = random.choice(words)\r\n\r\n\r\ndef get_guess():\r\n\r\n dashes = \"-\" * len(secret_word)\r\n guesses_left = 10\r\n\r\n while guesses_left > -1 and not dashes == secret_word:\r\n\r\n print(dashes)\r\n print(str(guesses_left))\r\n\r\n my_guess = input(\"Guess:\")\r\n\r\n if len(my_guess) != 1:\r\n print(\"Write only one letter!\")\r\n\r\n elif my_guess in secret_word:\r\n print(\"That letter is in the secret word!\")\r\n dashes = update_dashes(secret_word, dashes, my_guess)\r\n\r\n else:\r\n print(\"That letter is not in the secret word!\")\r\n guesses_left -= 1\r\n\r\n if guesses_left < 0:\r\n print(\"You lose. The word was: \" + str(secret_word))\r\n else:\r\n print(\"Congrats! You win. The word was: \" + str(secret_word))\r\n\r\n\r\ndef update_dashes(secret, cur_dash, rec_guess):\r\n result = \"\"\r\n\r\n for i in range(len(secret)):\r\n if secret[i] == rec_guess:\r\n result = result + rec_guess\r\n\r\n else:\r\n result = result + cur_dash[i]\r\n\r\n return result\r\n\r\n\r\nget_guess()\r\n","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"556552157","text":"import pytest\n\nfrom domain.batch import Batch\nfrom domain.order_line import OrderLine\nfrom datetime import date\nfrom tests.common_test import get_session, clear_all\n\n\ndef test_orderline_mapper_can_load_lines(session):\n query = \"\"\"\n INSERT INTO order_lines (ref, sku, qty) VALUES\n ('order1', 'CHAIR', 12),\n ('order2', 'TABLE', 13),\n ('order3', 'LIPSTICK', 14)\n \"\"\"\n session.execute(query)\n expected = [\n OrderLine(\"order1\", \"CHAIR\", 12),\n OrderLine(\"order2\", \"TABLE\", 13),\n OrderLine(\"order3\", \"LIPSTICK\", 14),\n ]\n obtained = session.query(OrderLine).all()\n assert obtained == expected\n\ndef test_orderline_mapper_can_save_lines(session):\n new_line = OrderLine(\"order1\", \"DECORATION\", 12)\n session.add(new_line)\n session.commit()\n rows = list(session.execute(\"SELECT ref,sku,qty from 'order_lines'\"))\n assert rows == [(\"order1\", \"DECORATION\",12)]\n\ndef test_retrieving_batches(session):\n query = \"\"\"\n INSERT INTO batches(reference, sku, _purchased_quantity, eta)\n VALUES \n ('batch1', \"sku1\", 100, null),\n ('batch2', \"sku2\", 200, '2011-04-11');\n \"\"\"\n session.execute(query)\n expected = [\n Batch(\"batch1\", \"sku1\", 100, eta=None),\n Batch(\"batch2\", \"sku2\", 200, eta=date(2011,4,11)),\n ]\n obtained = session.query(Batch).all()\n assert obtained == expected\n\n\ndef test_saving_batches(session): \n batch = Batch(\"batch1\", \"sku1\", 100, eta=None)\n session.add(batch)\n session.commit()\n rows = list(session.execute(\n \"SELECT reference, sku, _purchased_quantity, eta from 'batches'\"\n ))\n assert rows == [('batch1', 'sku1', 100, None)]\n\ndef test_saving_allocations(session):\n batch = Batch(\"batch1\", \"sku1\", 100, eta=None)\n line = OrderLine(\"order1\", \"sku1\", 10)\n batch.allocate(line)\n session.add(batch)\n session.commit()\n query = \"\"\"\n SELECT ol.ref, b.reference \n FROM allocations a\n INNER JOIN order_lines ol on ol.id = a.id\n INNER JOIN batches b on b.id = a.id\n \"\"\"\n rows = list(session.execute(query))\n assert rows == [(line.ref, batch.reference)]\n\n\ndef test_retrieveing_allocations(session):\n query = \"\"\"\n INSERT INTO order_lines(ref, sku, qty)\n VALUES\n ('order1', 'sku1', 12);\n \"\"\"\n session.execute(query)\n [[olid]] = session.execute(\n 'SELECT id FROM order_lines WHERE ref=:ref AND sku=:sku',\n dict(ref='order1', sku='sku1')\n )\n query = \"\"\"\n INSERT INTO batches(reference, sku, _purchased_quantity, eta)\n VALUES\n ('batch1', 'sku1', 100, null);\n \"\"\"\n session.execute(query)\n [[bid]] = session.execute(\n 'SELECT id FROM batches WHERE reference=:ref AND sku=:sku',\n dict(ref='batch1', sku='sku1')\n )\n query = \"\"\"\n INSERT INTO allocations(orderline_id, batch_id)\n VALUES\n ({olid}, {bid})\n \"\"\".format(olid=olid, bid=bid)\n session.execute(query)\n batch = session.query(Batch).one()\n assert batch._allocations == {\n OrderLine(\"order1\", \"sku1\", 12)\n }\n","sub_path":"proyectos/chapter4b/tests/integration/test_orm.py","file_name":"test_orm.py","file_ext":"py","file_size_in_byte":3112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"495941205","text":"import re\n\nfrom django.contrib.auth.backends import ModelBackend\n\nfrom apps.users.models import User\nfrom meiduo_mall import settings\n\n'''\n封装/ 抽取的思想\n 为什么要封装/抽取?\n 1.降低代码的耦合度 (高内聚,低耦合)\n 2.提高代码的重用性 (很多地方都用到了重复的代码)\n \n 抽取/封装的步骤:\n 1.定义一个函数(方法),把要抽取的代码复制过来\n 2.哪里有问题改哪里,没有的变量以参数的形式传递\n 3.验证抽取方法\n \n 什么时候进行抽取/封装\n 1.某几行代码实现了一个小功能,我们就可以抽取/封装\n 2.我们的代码只要第二次重复使用就要抽取/封装\n \n'''\ndef get_user_by_username(username):\n try:\n if re.match(r'1[3-9]\\d{9}', username):\n\n # ① username是手机号\n user = User.objects.get(mobile=username)\n else:\n\n # ② username是用户名\n user = User.objects.get(username=username)\n except User.DoesNotExist:\n return None\n\n return user\n\n\nclass UsernameMobileBackend(ModelBackend):\n\n def authenticate(self, request, username=None, password=None, **kwargs):\n\n # 1.先查询用户\n\n # username有可能是手机号也有可能是用户名\n # 通过对username进行正则来区分(也可以用Q对象的或来区分)\n # try:\n # if re.match(r'1[3-9]\\d{9}',username):\n #\n # # ① username是手机号\n # user = User.objects.get(mobile=username)\n # else:\n #\n # # ② username是用户名\n # user = User.objects.get(username=username)\n # except User.DoesNotExist:\n # return None\n user = get_user_by_username(username)\n\n\n\n # 2.判断用户的密码是否正确\n if user is not None and user.check_password(password):\n return user\nfrom itsdangerous import TimedJSONWebSignatureSerializer as Serializer\ndef active_email_url(email,user_id):\n\n # 1.创建实例\n s = Serializer(secret_key=settings.SECRET_KEY,expires_in=3600)\n # 2.组织数据\n data = {\n 'email':email,\n 'id':user_id\n }\n\n # 3.加密\n token = s.dumps(data)\n # 4.返回激活url\n\n return 'http://www.meiduo.site:8000/email_active/?token=%s'%token.decode()\n\nfrom itsdangerous import BadSignature,SignatureExpired\ndef check_email_active_token(token):\n\n # 1.创建实例\n s = Serializer(secret_key=settings.SECRET_KEY,expires_in=3600)\n # 2.解密数据(得到的是字典数据)\n # 解密数据的时候如果token过期,会报异常:SignatureExpired; 如果token被篡改会报异常:BadSignature\n try:\n result = s.loads(token)\n # except Exception as e:\n except BadSignature :\n return None\n # 3.获取数据\n email = result.get('email')\n id = result.get('id')\n # 4.返回数据\n # ① 可以将email和id返回\n # ② 可以在这里一起查询,再返回user,这个选择这种方式\n try:\n user = User.objects.get(email=email,id=id)\n except User.DoesNotExist:\n return None\n return user\n","sub_path":"meiduo_mall/apps/users/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"165915363","text":"from flask import Blueprint\nfrom datetime import datetime, timedelta\nfrom functools import partial\nfrom api.schemas import DashboardTileSchema, DashboardTileDataSchema, ObservableSchema, ActionFormParamsSchema\nfrom api.utils import jsonify_data, get_jwt, get_json\nfrom api.infocon import get_infocon, get_attack_summary, get_topports, get_topip\n\ndashboard_api = Blueprint('dashboard', __name__)\nget_dashboardtile_form_params = partial(get_json, schema=DashboardTileSchema())\nget_dashboardtiledata_form_params = partial(get_json, schema=DashboardTileDataSchema())\n\n\ndef set_valid_time():\n return {\n 'start_time': str(datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\")),\n 'end_time': str((datetime.now() + timedelta(minutes=30)).strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\"))\n }\ndef set_observed_time(timeframe_in_sec):\n return {\n 'start_time': str((datetime.now() - timedelta(seconds=timeframe_in_sec)).strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\")),\n 'end_time': str(datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\"))\n\n }\n\ndef create_tile_data(threat_level, diary_name, url):\n valid_time = set_valid_time()\n observed_time = set_observed_time(86400)\n txt = '| **{}** | | [{}]({}) |'.format(threat_level, diary_name, url)\n data = []\n data.append('[SANS Internet Storm Center](https://isc.sans.edu/infocon.html)')\n data.append(' ')\n data.append('| Threat Level |   | Dairy |')\n data.append('| -- | -- | -- |')\n data.append(txt)\n\n data = {\n 'valid_time': valid_time,\n 'hide_legend': True,\n 'cache_scope': 'org',\n 'observed_time': observed_time,\n 'data': data\n }\n return data\n\ndef create_tile_data_reports(data_json):\n valid_time = set_valid_time()\n observed_time = set_observed_time(2592000)\n keys = [\n {\n 'key': 'reports',\n 'label': 'Reports'\n }\n ]\n data = {\n 'valid_time': valid_time,\n 'hide_legend': True,\n 'cache_scope': 'org',\n 'observed_time': observed_time,\n 'key_type': 'timestamp',\n 'keys': keys,\n 'data': data_json\n }\n return data\n\ndef create_tile_data_targets(data_json):\n valid_time = set_valid_time()\n observed_time = set_observed_time(2592000)\n keys = [\n {\n 'key': 'targets',\n 'label': 'Targets'\n }\n ]\n data = {\n 'valid_time': valid_time,\n 'hide_legend': True,\n 'cache_scope': 'org',\n 'observed_time': observed_time,\n 'key_type': 'timestamp',\n 'keys': keys,\n 'data': data_json\n }\n return data\n\ndef create_tile_data_sources(data_json):\n valid_time = set_valid_time()\n observed_time = set_observed_time(2592000)\n keys = [\n {\n 'key': 'sources',\n 'label': 'Sources'\n }\n ]\n data = {\n 'valid_time': valid_time,\n 'hide_legend': True,\n 'cache_scope': 'org',\n 'observed_time': observed_time,\n 'key_type': 'timestamp',\n 'keys': keys,\n 'data': data_json\n }\n return data\n\ndef get_tile(description, tags, tile_type, title, tile_id):\n if tile_id == 'SANS_Infocon':\n periods = 'last_24_hours'\n elif tile_id == 'SANS_TopPorts':\n periods = 'last_24_hours'\n elif tile_id == 'SANS_Reports':\n periods = 'last_30_days'\n else:\n periods = 'last_30_days'\n return {\n 'description': description,\n 'periods': [\n periods\n ],\n 'tags': tags,\n 'type': tile_type,\n 'short_description': description,\n 'title': title,\n 'default_period': periods,\n 'id': tile_id\n }\n\n\n@dashboard_api.route('/tiles', methods=['POST'])\ndef tiles():\n #get_jwt()\n data = []\n\n # Tile SANS Internet Storm Center & Diary\n title = 'SANS Internet Storm Center Threat Level and daily diary information'\n tags = ['SANS', 'Threat Level', 'Infocon']\n tile_type = 'markdown'\n description = 'SANS Internet Storm Center Infocon Threat Level & Diares'\n tile_id = 'SANS_Infocon'\n data.append(get_tile(description, tags, tile_type, title, tile_id))\n\n # Tile SANS Internet Storm Center Daily Reports\n title = 'SANS Internet Storm Center Daily Reports'\n tags = ['SANS', 'Threat Level', 'Reports']\n tile_type = 'vertical_bar_chart'\n description = 'SANS Internet Storm Center Daily Reports'\n tile_id = 'SANS_Reports'\n data.append(get_tile(description, tags, tile_type, title, tile_id))\n\n # Tile SANS Internet Storm Center Daily Summary of totals tof sources\n title = 'SANS Internet Storm Center Daily Summary of sources'\n tags = ['SANS', 'Threat Level', 'Sources']\n tile_type = 'vertical_bar_chart'\n description = 'SANS Internet Storm Center Daily Summary of sources'\n tile_id = 'SANS_Sources'\n data.append(get_tile(description, tags, tile_type, title, tile_id))\n\n # Tile SANS Internet Storm Center Daily Summary of totals targets\n title = 'SANS Internet Storm Center Daily Summary of targets'\n tags = ['SANS', 'Threat Level', 'Targets']\n tile_type = 'vertical_bar_chart'\n description = 'SANS Internet Storm Center Daily Summary of targets'\n tile_id = 'SANS_Targets'\n data.append(get_tile(description, tags, tile_type, title, tile_id))\n\n # Tile SANS Internet Storm Center Daily Top ports\n title = 'SANS Internet Storm Center Daily Top ports'\n tags = ['SANS', 'Threat Level', 'TopPorts']\n tile_type = 'horizontal_bar_chart'\n description = 'SANS Internet Storm Center Daily Top ports'\n tile_id = 'SANS_TopPorts'\n data.append(get_tile(description, tags, tile_type, title, tile_id))\n\n # Tile SANS Internet Storm Center Daily Top IP\n title = 'SANS Internet Storm Center Daily Top IP by attack'\n tags = ['SANS', 'Threat Level', 'TopIP']\n tile_type = 'horizontal_bar_chart'\n description = 'SANS Internet Storm Center Daily Top IP by attack '\n tile_id = 'SANS_TopIP'\n data.append(get_tile(description, tags, tile_type, title, tile_id))\n\n\n return jsonify_data(data)\n\n@dashboard_api.route('/tiles/tile', methods=['POST'])\ndef tile():\n #get_jwt()\n return jsonify_data({})\n\n@dashboard_api.route('/tiles/tile-data', methods=['POST'])\ndef tile_data():\n data = []\n #get_jwt()\n params = get_dashboardtiledata_form_params()\n if params['tile_id'] == 'SANS_Infocon':\n threat_level, diary_name, url = get_infocon()\n data = create_tile_data(threat_level, diary_name, url)\n return jsonify_data(data)\n elif params['tile_id'] == 'SANS_Reports':\n xml, start_day, end_day = get_attack_summary(30)\n data_json = []\n for x in xml.findall('daily'):\n day = {}\n date = datetime.strptime(x.find('date').text, \"%Y-%m-%d\")\n day['key'] = datetime.timestamp(date) * 1000\n day['label'] = '{}, 00:00:00'.format(\n datetime.strptime(x.find('date').text, \"%Y-%m-%d\").strftime('%m/%d/%Y'))\n day['value'] = int(x.find('records').text) + int(x.find('sources').text) + int(x.find('targets').text)\n values = [\n {\n 'key': 'reports',\n 'value': int(x.find('records').text),\n 'tooltip': 'Reports: {}'.format(int(x.find('records').text)),\n 'link_uri': 'https://isc.sans.edu/submissions.html?startdate={}&enddate={}&yname=sources&y2name=targets&submit=Update'.format(start_day, end_day)\n }\n ]\n day['values'] = values\n data_json.append(day)\n data = create_tile_data_reports(data_json)\n return jsonify_data(data)\n\n elif params['tile_id'] == 'SANS_Targets':\n xml, start_day, end_day = get_attack_summary(30)\n data_json = []\n for x in xml.findall('daily'):\n day = {}\n date = datetime.strptime(x.find('date').text, \"%Y-%m-%d\")\n day['key'] = datetime.timestamp(date) * 1000\n day['label'] = '{}, 00:00:00'.format(\n datetime.strptime(x.find('date').text, \"%Y-%m-%d\").strftime('%m/%d/%Y'))\n day['value'] = int(x.find('records').text) + int(x.find('sources').text) + int(x.find('targets').text)\n values = [\n {\n 'key': 'targets',\n 'value': int(x.find('targets').text),\n 'tooltip': 'Targets: {}'.format(int(x.find('targets').text)),\n 'link_uri': 'https://isc.sans.edu/submissions.html?startdate={}&enddate={}&yname=sources&y2name=targets&submit=Update'.format(start_day, end_day)\n }\n ]\n day['values'] = values\n data_json.append(day)\n data = create_tile_data_targets(data_json)\n return jsonify_data(data)\n\n elif params['tile_id'] == 'SANS_Sources':\n xml, start_day, end_day = get_attack_summary(30)\n data_json = []\n for x in xml.findall('daily'):\n day = {}\n date = datetime.strptime(x.find('date').text, \"%Y-%m-%d\")\n day['key'] = datetime.timestamp(date) * 1000\n day['label'] = '{}, 00:00:00'.format(\n datetime.strptime(x.find('date').text, \"%Y-%m-%d\").strftime('%m/%d/%Y'))\n day['value'] = int(x.find('records').text) + int(x.find('sources').text) + int(x.find('targets').text)\n values = [\n {\n 'key': 'sources',\n 'value': int(x.find('sources').text),\n 'tooltip': 'Sources: {}'.format(int(x.find('sources').text)),\n 'link_uri': 'https://isc.sans.edu/submissions.html?startdate={}&enddate={}&yname=sources&y2name=targets&submit=Update'.format(start_day, end_day)\n }\n ]\n day['values'] = values\n data_json.append(day)\n data = create_tile_data_sources(data_json)\n return jsonify_data(data)\n\n elif params['tile_id'] == 'SANS_TopPorts':\n json, today = get_topports()\n keys = []\n data = []\n for x in range(0, 10):\n a = {}\n a['key'] = 'Port-{}'.format(str(json[str(x)]['targetport']))\n a['label'] = 'Port-{}'.format(str(json[str(x)]['targetport']))\n keys.append(a)\n b = {}\n c = {}\n values = []\n b['key'] = 'Port-{}'.format(str(json[str(x)]['targetport']))\n b['label'] = 'Port-{}'.format(str(json[str(x)]['targetport']))\n b['value'] = json[str(x)]['records']\n c['key'] = 'Port-{}'.format(str(json[str(x)]['targetport']))\n c['tooltip'] = 'Port-{}'.format(str(json[str(x)]['targetport']))\n c['link_uri'] = 'https://isc.sans.edu/port.html?port={}'.format(str(json[str(x)]['targetport']))\n c['value'] = json[str(x)]['records']\n values.append(c)\n b['values'] = values\n data.append(b)\n valid_time = set_valid_time()\n observed_time = set_observed_time(86400)\n response = {\n 'valid_time': valid_time,\n 'keys': keys,\n 'cache_scope': 'org',\n 'hide_legend': True,\n 'observed_time': observed_time,\n 'key_type': 'string',\n \"observable_type\": False,\n 'data': data\n }\n return jsonify_data(response)\n\n elif params['tile_id'] == 'SANS_TopIP':\n json, today = get_topip()\n keys = []\n data = []\n for x in json:\n a = {}\n a['key'] = x['ip']\n a['label'] = x['ip']\n keys.append(a)\n b = {}\n c = {}\n values = []\n b['key'] = x['ip']\n b['label'] = x['ip']\n b['value'] = x['attacks']\n c['key'] = x['ip']\n c['tooltip'] = x['ip']\n c['link_uri'] = 'https://isc.sans.edu/ipinfo.html?ip={}'.format(x['ip'])\n c['value'] = x['attacks']\n values.append(c)\n b['values'] = values\n data.append(b)\n valid_time = set_valid_time()\n observed_time = set_observed_time(86400)\n response = {\n 'valid_time': valid_time,\n 'keys': keys,\n 'cache_scope': 'org',\n 'hide_legend': True,\n 'observed_time': observed_time,\n 'key_type': 'string',\n \"observable_type\": 'ip',\n 'data': data\n }\n return jsonify_data(response)\n\n\n else:\n return jsonify_data(data)\n\n","sub_path":"code/api/dashboard.py","file_name":"dashboard.py","file_ext":"py","file_size_in_byte":12656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"238107760","text":"from __future__ import print_function\n\nimport os\nimport setuptools\nimport shutil\nimport subprocess\nimport sys\n\n# Convert README.md to reStructuredText.\nif {'bdist_wheel', 'sdist'}.intersection(sys.argv):\n try:\n import pypandoc\n except ImportError:\n print('WARNING: You should install `pypandoc` to convert `README.md` '\n 'to reStructuredText to use as long description.',\n file=sys.stderr)\n else:\n print('Converting `README.md` to reStructuredText to use as long '\n 'description.')\n long_description = pypandoc.convert('README.md', 'rst')\n\n# Clean and install bower components.\nif {'bdist_wheel', 'develop', 'sdist'}.intersection(sys.argv):\n cwd = os.getcwd()\n os.chdir('icekit_events/static/icekit_events')\n if os.path.exists('bower_comonents'):\n print('Cleaning bower components.')\n shutil.rmtree('bower_components')\n print('Installing bower components.')\n try:\n if subprocess.call(['bower', 'install', '--allow-root'], stderr=sys.stderr):\n raise RuntimeError\n except (OSError, RuntimeError):\n print('ERROR: Unable to install bower components.', file=sys.stderr)\n if {'bdist_wheel', 'sdist'}.intersection(sys.argv):\n exit(1)\n os.chdir(cwd)\n\nsetuptools.setup(\n name='icekit-events',\n use_scm_version={'version_scheme': 'post-release'},\n author='Interaction Consortium',\n author_email='studio@interaction.net.au',\n url='https://github.com/ic-labs/icekit-events',\n description='',\n long_description=locals().get('long_description', ''),\n license='MIT',\n packages=setuptools.find_packages(),\n include_package_data=True,\n install_requires=[\n 'Django<1.9',\n 'django-icekit',\n 'django-polymorphic',\n 'django-polymorphic-tree',\n 'django-timezone',\n 'mkdocs',\n 'python-dateutil',\n 'pytz',\n 'six',\n 'sqlparse', # Required for SQL migrations, apparently\n 'django-colorful',\n ],\n extras_require={\n 'dev': ['ipdb', 'ipython'],\n 'fluentevent': ['django-fluent-contents'],\n 'postgres': ['psycopg2'],\n 'test': [\n 'coverage',\n 'django-dynamic-fixture',\n 'django-nose',\n 'django-webtest',\n 'nose-progressive',\n 'WebTest',\n ]\n },\n setup_requires=['setuptools_scm'],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"328619182","text":"\"\"\"\nMicroPython water indication with RGB NeoPixel Example\nhttps://github.com/STEMinds/micropython-eduponics\nMIT License\nCopyright (c) 2021 STEMinds\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\nimport machine, neopixel\nimport time\n\n# define water level sensor as INPUT on IO pin number 21\nwater_level = machine.Pin(21, machine.Pin.IN)\n# Configure the RGB LED at IO pin 14 (1 indicates 1 LED)\nnp = neopixel.NeoPixel(machine.Pin(14), 1)\n\ndef is_empty():\n # will return 0 if container have no water and 1 if it has water\n return water_level.value()\n\ntry:\n while True:\n if(is_empty()):\n print(\"[-] You don't have water in the container\")\n np[0] = (255, 0, 0) # set to red, full brightness\n else:\n print(\"[-] The water container has sufficient amount of water\")\n np[0] = (0, 255, 0) # set to green, full brightness\n # save changes\n np.write() # save changes\n # wait one second before checking again\n time.sleep(1)\nexcept KeyboardInterrupt:\n # keyboard interrupt, let's turn off LED\n np[0] = (0, 0, 0)\n np.write()\n","sub_path":"examples/water_indicator_RGB.py","file_name":"water_indicator_RGB.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"554995584","text":"import os\n\nfrom flask import Flask\nfrom flask import make_response\nfrom flask import jsonify\n\napp = Flask(__name__)\n\napp.config.update(\n {\n 'JSONIFY_PRETTYPRINT_REGULAR' : True,\n }\n)\n\n@app.route('/')\ndef hello_world():\n location = os.environ.get('LOCATION', 'nowhere')\n\n response = {\n 'Greeting' : 'Hello!',\n 'Location' : location,\n }\n return make_response(jsonify(response), 200)\n","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"583746788","text":"#!/usr/bin/env python\n\n\ns = \"哈哈\"\ns_encode = s.encode(\"utf-8\")\nprint(s_encode)\n\ns_decode = s_encode.decode(\"utf-8\").encode(\"gbk\")\n\nprint(s_decode)\na = [\"Wei0\",\"wei1\",\"wei2\",\"wei3\"]\na[1] = \"tttt\"\nprint(a)\n\nb = '陈威'\nb = b.encode(encoding='utf-8')\nprint(type(b))","sub_path":"Modular_one/第二节/字符编码的转换.py","file_name":"字符编码的转换.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"590137943","text":"from __future__ import unicode_literals\n\nfrom googleapiclient import discovery\n\nurl = 'https://www.youtube.com/watch?v=M7FIvfx5J10'\nvideo_id = url.split('=')[1]\n\n\"\"\"\"\"Функция, которая достает основную информацию о видеоролике:\n Лайки, дизлайки, просмотры, языки, длительность, айди, описание,превью,количество комментов, канал,время, тэги. \n Данные хранятся в словаре с соответствующими ключами\"\"\"\n\n\ndef video_information(videoid):\n api_key = 'AIzaSyC4BEjvtzErw6kbErLw8x2bhikb1DM2F1w'\n youtube = discovery.build('youtube', 'v3', developerKey=api_key)\n data = youtube.videos().list(part='snippet,contentDetails,statistics', id=videoid).execute()\n sub = youtube.captions().list(part='snippet', videoId=videoid).execute()\n keys = 'id', 'title', 'description', 'preview', 'channelTitle', 'likes', 'dislikes', 'comments', 'views', 'duration', 'language', 'tag', 'time','subs'\n for item in data.get('items'):\n id = item.get('id')\n title = item.get('snippet').get('title')\n channelTitle = item.get('snippet').get('channelTitle')\n description = item.get('snippet').get('description')\n preview = item.get('snippet').get('thumbnails').get('high').get('url')\n likes = item.get('statistics').get('likeCount')\n dislikes = item.get('statistics').get('dislikeCount')\n comments = item.get('statistics').get('commentCount')\n views = item.get('statistics').get('viewCount')\n duration = item.get(\"contentDetails\").get('duration')\n language = item.get('snippet').get('language')\n tags = item.get('snippet').get('tags')\n time = item.get('snippet').get('publishedAt')\n subs = [item.get('snippet').get('language') for item in sub.get('items')]\n\n values = id, title, description, preview, channelTitle, likes, dislikes, comments, views, duration, language, tags, time,subs\n\n video_item = dict(zip(keys, values))\n return (video_item)\n\n\n\"\"\" Функция, которая достает комментарии ( пока что не очень понятно, по какому принципу)\"\"\"\n\n\ndef get_comments(video_id):\n api_key = 'AIzaSyC4BEjvtzErw6kbErLw8x2bhikb1DM2F1w'\n youtube = discovery.build('youtube', 'v3', developerKey=api_key)\n results = youtube.commentThreads().list(part=\"snippet\", videoId=video_id, maxResults=100).execute()\n # keys = 'author', 'comment', 'likes'\n\n for item in results[\"items\"]:\n comment = item.get(\"snippet\").get(\"topLevelComment\")\n author = comment.get(\"snippet\").get(\"authorDisplayName\")\n text = comment.get(\"snippet\").get(\"textDisplay\")\n likes = comment.get(\"snippet\").get(\"likeCount\")\n print((author, text, likes))\n # values = author, text, likes\n return results['items']\n\n\n\"\"\"Функция выводит number первых трендов и информацию о них\"\"\"\n\n\ndef popular(youtube, number):\n data = youtube.videos().list(part='snippet,contentDetails,statistics', chart='mostPopular', regionCode='RU',\n maxResults=number).execute()\n keys = 'id', 'title', 'description', 'preview', 'channelTitle', 'likes', 'dislikes', 'comments', 'views', 'duration', 'language', 'tag', 'time'\n for item in data.get('items'):\n id = item.get('id')\n title = item.get('snippet').get('title')\n channelTitle = item.get('snippet').get('channelTitle')\n description = item.get('snippet').get('description')\n preview = item.get('snippet').get('thumbnails').get('high').get('url')\n likes = item.get('statistics').get('likeCount')\n dislikes = item.get('statistics').get('dislikeCount')\n comments = item.get('statistics').get('commentCount')\n views = item.get('statistics').get('viewCount')\n duration = item.get(\"contentDetails\").get('duration')\n language = item.get('snippet').get('language')\n tags = item.get('snippet').get('tags')\n time = item.get('snippet').get('publishedAt')\n values = id, title, description, preview, channelTitle, likes, dislikes, comments, views, duration, language, tags, time\n video_item = dict(zip(keys, values))\n print(video_item)\n return data['items']\n\n\n\napi_key = 'AIzaSyC4BEjvtzErw6kbErLw8x2bhikb1DM2F1w'\nyoutube = discovery.build('youtube', 'v3', developerKey=api_key)\nprint(video_information(video_id))\n","sub_path":"youtube.py","file_name":"youtube.py","file_ext":"py","file_size_in_byte":4579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"452962115","text":"# codding = utf8\n\nimport datetime\nimport shutil\nimport xml\nimport time\n\nimport xlwt\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, redirect\nfrom django.db.models import Sum, Max, F, Q\nfrom django.http import HttpResponse, HttpResponseRedirect, JsonResponse, FileResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.db.models import Q\n\nfrom django.core.paginator import Paginator\n\nimport xlrd\nimport requests, re\nfrom .models import *\nfrom django.conf import settings\nimport os, json\nfrom django.core.paginator import *\nfrom bs4 import BeautifulSoup\nfrom bs4 import element\nimport os\nfrom django.views.decorators.cache import cache_page\nfrom django.core.cache import cache\nfrom bs4 import BeautifulSoup\nfrom bs4 import element\nfrom concurrent.futures import ThreadPoolExecutor\n\n\n# 视图接受Web请求并且返回Web响应\n# 视图就是一个python函数,被定义在views.py中\n\n@login_required\ndef index(request):\n return render(request, 'ProjectName/projectName.html')\n\n\n@login_required\ndef projectName(request):\n return render(request, 'ProjectName/projectName.html')\n\n\n@login_required\ndef addProjectNamepage(request):\n return render(request, 'ProjectName/addProjectName.html')\n\n\n\n\n@login_required\n@csrf_exempt\ndef getProjectNamelist(request):\n projectnameobj = ProjectNameManage.objects.all()\n\n # total = shipManage.objects.filter(systemStatus__contains=\"使用中\").count()\n total = 0\n resultdict = {}\n list1 = []\n for project in projectnameobj:\n # print(\"--\")\n total = total + 1\n dict = {}\n dict['projectName'] = project.projectName\n dict['projectDesc'] = project.projectDesc\n dict['businessPeople'] = project.businessPeople\n dict['businessPhone'] = project.businessPhone\n dict['developPeople'] = project.developPeople\n dict['developPhone'] = project.developPhone\n dict['developmentCompany'] = project.developmentCompany\n dict['common'] = project.common\n dict['createTime'] = project.createTime\n dict['updateTime'] = project.updateTime\n dict['createPeople'] = project.createPeople\n\n list1.append(dict)\n\n\n try:\n # list1.sort(key=lambda k: (k.get('CreateTime')), reverse=False)\n list1.sort(key=lambda k: (k.get('companyName')), reverse=False)\n except TypeError as e:\n pass\n\n # 分页,?page=3&limit=20\n page = request.GET.get('page')\n limit = request.GET.get('limit')\n pageInator = Paginator(list1, limit)\n list1 = pageInator.page(page)\n # print(page, list1)\n\n res = [] # 最终返回的结果集合\n for contact in list1:\n res.append(contact)\n resultdict = {\"code\": 0, \"msg\": \"成功\", \"count\": total, \"data\": res}\n\n return JsonResponse(resultdict, safe=False)\n\n\n\n@login_required\n@csrf_exempt\ndef uploadProjectNamefile(request):\n # print(request.FILES)\n if request.method == 'POST':\n textFile = request.FILES['file']\n # filepath = os.path.join(settings.UPLOAD_DIR, textFile.name)\n filepath = os.path.join(settings.UPLOAD_DIR, \"projectNameManage/projectNameAsset.xls\")\n # print(filepath)\n dirpath = os.path.join(settings.UPLOAD_DIR, \"projectNameManage/bak/\")\n if not os.path.exists(dirpath):\n os.makedirs(dirpath)\n\n if os.path.exists(filepath):\n new_filepath = os.path.join(settings.UPLOAD_DIR,\n \"projectNameManage/bak/projectNameAsset%s\" % time.strftime('%Y_%b_%d-%H_%M_%S') + \".xls\")\n\n # print(filepath)\n # print(new_filepath)\n shutil.move(filepath, new_filepath)\n # os.rename(filepath, new_filepath)\n # print(\"ok\")\n\n with open(filepath, 'wb') as f:\n for text in textFile.chunks(): # 分包写入\n f.write(text)\n\n resultdict = {\"code\": 0}\n # resultdict = ''\n print(resultdict)\n return JsonResponse(resultdict, safe=False)\n\n\n\n\n\n@login_required\n@csrf_exempt\ndef initProjectName(request):\n try:\n\n ProjectNameManage.objects.all().delete()\n\n path = 'static/upload/projectNameManage/projectNameAsset.xls'\n workbook = xlrd.open_workbook(path) # 打开execl\n\n # 输出Excel文件中所有sheet的名字\n # print(workbook.sheet_names())\n # 根据sheet索引或者名称获取sheet内容\n shipSheet = workbook.sheets()[0] # 通过索引获取\n # vulnSheet = workbook.sheet_by_index(0) # 通过索引获取\n # vulnSheet = workbook.sheet_by_name('ECS') # 通过名称获取\n # print(vulnSheet.name) # 获取sheet名称\n rowNum = shipSheet.nrows # sheet行数\n # colNum = shipSheet.ncols # sheet列数\n\n firstLineValue = ['项目名称', '项目描述', '业务联系人', '业务联系方式', '开发联系人', '开发联系方式', '开发单位', '备注', '创建时间', '修改时间', '创建人']\n\n print(firstLineValue)\n print(shipSheet.row_values(0))\n\n\n if shipSheet.row_values(0) != firstLineValue:\n result = \"文件列名不正确,请参考模板文件\"\n print(\"[ log ] -> \", result)\n return JsonResponse(result, safe=False)\n\n # # 获取所有单元格的内容\n # list = []\n # for i in range(rowNum):\n # rowlist = []\n # for j in range(colNum):\n # rowlist.append(Data_sheet.cell_value(i, j))\n # print(type(date_value), date_value)\n # list.append(rowlist)\n\n # 输出所有单元格的内容\n for row in range(rowNum):\n if row == 0:\n continue\n projectName = str((shipSheet.cell_value(row, 0)))\n projectDesc = str((shipSheet.cell_value(row, 1)))\n businessPeople = str((shipSheet.cell_value(row, 2)))\n businessPhone = str((shipSheet.cell_value(row, 3))).replace(\".0\", \"\")\n developPeople = str((shipSheet.cell_value(row, 4)))\n developPhone = str((shipSheet.cell_value(row, 5))).replace(\".0\", \"\")\n developmentCompany = str((shipSheet.cell_value(row, 6)))\n common = str((shipSheet.cell_value(row, 7)))\n createTime = str((shipSheet.cell_value(row, 8)))\n updateTime = str((shipSheet.cell_value(row, 9)))\n createPeople = str((shipSheet.cell_value(row, 10)))\n\n # for j in range(colNum):\n # # # 获取单元格内容为日期的数据\n # # date_value = xlrd.xldate_as_tuple(Data_sheet.cell_value(i, j), workbook.datemode)\n # # print(date_value)\n # print(ecsSheet.cell_value(row, j))\n # # print(list[i][j], '\\t\\t', end=\"\")\n #\n # ecsIPS = re.compile(r'((\\d|[1-9]\\d|1\\d\\d|2[0-4]\\d|25[0-5])\\.){3}(1\\d\\d|2[0-4]\\d|25[0-5]|[1-9]\\d|\\d)')\n\n projectNameobj = ProjectNameManage()\n projectNameobj.projectName = projectName\n projectNameobj.projectDesc = projectDesc\n projectNameobj.businessPeople = businessPeople\n projectNameobj.businessPhone = businessPhone\n projectNameobj.developPeople = developPeople\n projectNameobj.developPhone = developPhone\n projectNameobj.developmentCompany = developmentCompany\n projectNameobj.common = common\n projectNameobj.createTime = createTime\n projectNameobj.updateTime = updateTime\n projectNameobj.createPeople = createPeople\n projectNameobj.save()\n\n print(\"[ log ] -> 初始化成功\")\n result = \"success\"\n except xlrd.biffh.XLRDError as e:\n print(\"[ Exception ] -> \", str(e))\n result = \"文件损坏,请重新创建文件\"\n return JsonResponse(result, safe=False)\n\n except Exception as e:\n print(\"[ Exception ] -> \", str(e))\n result = \"创建失败\"\n\n return JsonResponse(result, safe=False)\n\n\n\n@login_required\n@csrf_exempt\ndef addProjectNamesubmit(request):\n try:\n # print(request.POST)\n # print(dict(request.POST).keys())\n\n if request.POST.get('projectName') == \"\":\n result = \"项目名禁止为空\"\n print(\"[ log ] -> \", result)\n\n return JsonResponse(result, safe=False)\n\n if ProjectNameManage.objects.filter(projectName__iexact=request.POST.get('projectName').replace(\" \", \"\")):\n result = \"该项目名已存在\"\n print(\"[ log ] -> \", result)\n return JsonResponse(result, safe=False)\n\n projectNameobj = ProjectNameManage()\n projectNameobj.projectName = request.POST.get('projectName')\n projectNameobj.projectDesc = request.POST.get('projectDesc')\n projectNameobj.businessPeople = request.POST.get('businessPeople')\n projectNameobj.businessPhone = request.POST.get('businessPhone')\n projectNameobj.developPeople = request.POST.get('developPeople')\n projectNameobj.developPhone = request.POST.get('developPhone')\n projectNameobj.developmentCompany = request.POST.get('developmentCompany')\n projectNameobj.common = request.POST.get('common')\n projectNameobj.createTime = datetime.datetime.now()\n projectNameobj.updateTime = datetime.datetime.now()\n projectNameobj.createPeople = request.user\n\n projectNameobj.save()\n\n result = \"success\"\n except Exception as e:\n print(str(e))\n result = \"failed\"\n\n return JsonResponse(result, safe=False)\n\n\n\n@login_required\n@csrf_exempt\ndef editProjectNamesubmit(request):\n try:\n # print(request.POST.dict())\n projectName = request.POST.get('projectName')\n projectDesc = request.POST.get('projectDesc')\n\n obj1 = ProjectNameManage.objects.filter(projectName=projectName, projectDesc=projectDesc)\n if obj1:\n obj1.update(\n projectName=request.POST.get('projectName'),\n projectDesc=request.POST.get('projectDesc'),\n businessPeople=request.POST.get('businessPeople'),\n businessPhone=request.POST.get('businessPhone'),\n developPeople=request.POST.get('developPeople'),\n developPhone=request.POST.get('developPhone'),\n developmentCompany=request.POST.get('developmentCompany'),\n common=request.POST.get('common'),\n updateTime=datetime.datetime.now()\n )\n result = \"success\"\n\n else:\n result = \"该资产不存在,无法修改
提示 :禁止对系统名进行编辑\"\n except Exception as e:\n print(str(e))\n result = \"failed\"\n\n return JsonResponse(result, safe=False)\n\n\n@login_required\n@csrf_exempt\ndef deleteProjectNameall(request):\n try:\n ProjectNameManage.objects.all().delete()\n result = \"success\"\n except:\n result = \"failed\"\n return JsonResponse(result, safe=False)\n\n\n\n@login_required\n@csrf_exempt\ndef searchProjectName(request):\n # print(request.GET)\n\n projectName = request.GET.get('projectName', None).strip(\" \")\n projectDesc = request.GET.get('projectDesc', None).strip(\" \")\n businessPeople = request.GET.get('businessPeople', None).strip(\" \")\n developPeople = request.GET.get('developPeople', None).strip(\" \")\n\n # 方法一:\n projectNameobj = ProjectNameManage.objects.all()\n\n if projectName:\n projectNameobj = projectNameobj.filter(projectName__icontains=projectName)\n if projectDesc:\n projectNameobj = projectNameobj.filter(projectDesc__icontains=projectDesc)\n if businessPeople:\n projectNameobj = projectNameobj.filter(businessPeople__icontains=businessPeople)\n if developPeople:\n projectNameobj = projectNameobj.filter(developPeople__icontains=developPeople)\n\n\n # 方法二:\n # filter = {}\n # if mobile:\n # filter['searchHost'] = searchHost\n # if card:\n # filter['searchUrl'] = searchUrl\n # if status:\n # filter['get_state'] = get_state\n # requestobj.objects.filter(**filter)\n\n\n\n total = projectNameobj.count()\n print(\"total:\", total)\n list1 = []\n for ship in projectNameobj:\n dict = {}\n\n dict['projectName'] = ship.projectName\n dict['projectDesc'] = ship.projectDesc\n dict['businessPeople'] = ship.businessPeople\n dict['businessPhone'] = ship.businessPhone\n dict['developPeople'] = ship.developPeople\n dict['developPhone'] = ship.developPhone\n dict['developmentCompany'] = ship.developmentCompany\n dict['common'] = ship.common\n\n list1.append(dict)\n\n try:\n # list1.sort(key=lambda k: (k.get('serviceName')), reverse=False)\n list1.sort(key=lambda k: (k.get('projectName')), reverse=False)\n except TypeError as e:\n pass\n # print(list1)\n\n # 分页,?page=3&limit=20\n page = request.GET.get('page')\n limit = request.GET.get('limit')\n pageInator = Paginator(list1, limit)\n list1 = pageInator.page(page)\n\n # print(list1)\n res = [] # 最终返回的结果集合\n for contact in list1:\n res.append(contact)\n resultdict = {\"code\": 0, \"msg\": \"成功\", \"count\": total, \"data\": res}\n\n return JsonResponse(resultdict, safe=False)\n\n\n@login_required\n@csrf_exempt\ndef deleteProjectNameforline(request):\n try:\n if request.POST:\n delete = request.POST.get('delete')\n projectName = request.POST.get('projectName')\n projectDesc = request.POST.get('projectDesc')\n\n if delete == \"yes\":\n ProjectNameManage.objects.filter(projectName__iexact=projectName, projectDesc__iexact=projectDesc).delete()\n result = \"success\"\n print(result)\n else:\n result = \"failed\"\n return JsonResponse(result, safe=False)\n\n except Exception as e:\n return JsonResponse(str(e), safe=False)\n\n\n@login_required\n@csrf_exempt\ndef downloadProjectNameTemplateFile(request):\n file = open('static/upload/projectNameManage/projectNameTemplate.xls', 'rb')\n response = FileResponse(file)\n response['Content-Type'] = '.xls,application/vnd.ms-excel'\n response['Content-Disposition'] = 'attachment;filename=\"projectNameTemplate_%s.xls\"' % time.strftime('%Y_%b_%d-%H_%M_%S')\n return response\n\n\n@login_required\n@csrf_exempt\ndef downloadProjectNameAllToFile(request):\n\n filepath = os.path.join(settings.UPLOAD_DIR, \"projectNameManage/projectNameAssetAll.xls\")\n # print(filepath)\n\n dirpath = os.path.join(settings.UPLOAD_DIR, \"projectNameManage/bak/\")\n if not os.path.exists(dirpath):\n os.makedirs(dirpath)\n\n if os.path.exists(filepath):\n new_filepath = dirpath + \"projectNameAssetAll_%s\" % time.strftime('%Y_%b_%d-%H_%M_%S') + \".xls\"\n # print(filepath)\n # print(new_filepath)\n shutil.move(filepath, new_filepath)\n # os.rename(filepath, new_filepath)\n # print(\"ok\")\n\n projectNameobj = ProjectNameManage.objects.all()\n\n\n # 创建一个workbook 设置编码\n workbook = xlwt.Workbook(encoding='utf-8')\n # 创建一个worksheet\n projectSheet = workbook.add_sheet('projectAll')\n\n #生成第一行\n row0 = ['项目名称', '项目描述', '业务联系人', '业务联系方式', '开发联系人', '开发联系方式', '开发单位', '备注', '创建时间', '修改时间', '创建人']\n\n for i in range(0, len(row0)):\n projectSheet.write(0, i, row0[i])\n\n rownum = 0\n\n for project in projectNameobj:\n # if ship.systemStatus == \"已下线\":\n # continue\n rownum = rownum + 1\n projectSheet.write(rownum, 0, project.projectName)\n projectSheet.write(rownum, 1, project.projectDesc)\n projectSheet.write(rownum, 2, project.businessPeople)\n projectSheet.write(rownum, 3, project.businessPhone)\n projectSheet.write(rownum, 4, project.developPeople)\n projectSheet.write(rownum, 5, project.developPhone)\n projectSheet.write(rownum, 6, project.developmentCompany)\n projectSheet.write(rownum, 7, project.common)\n projectSheet.write(rownum, 8, str(project.createTime))\n projectSheet.write(rownum, 9, str(project.updateTime))\n projectSheet.write(rownum, 10, project.createPeople)\n\n # print(rownum)\n\n workbook.save(filepath)\n\n\n file = open(filepath, 'rb')\n\n\n response = FileResponse(file)\n response['Content-Type'] = '.xls,application/vnd.ms-excel'\n response['Content-Disposition'] = 'attachment;filename=\"projectNameAssetAll_%s.xls\"' % time.strftime('%Y_%b_%d-%H_%M_%S')\n return response\n\n\n\n\n","sub_path":"ProjectName/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":16699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"254024322","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # RW Data Script: ocn_014_index_of_coastal_protection_by_coral_reefs \n# [Metadata](https://docs.google.com/document/d/1IHZYUIh25JGZtx2k1ItTYvPA435PzIvoqCxzXjoEPzc/edit) \n# [Info](http://maps.oceanwealth.org/) \n# ~Source~ \n# \n# Author: Peter Kerins \n# Date: 2020 Nov 11 \n\n# ### Import\n\nimport os\nimport sys\nimport dotenv\ndotenv.load_dotenv(os.path.abspath(os.getenv('RW_ENV')))\nutils_path = os.path.join(os.path.abspath(os.getenv('PROCESSING_DIR')),'utils')\nif utils_path not in sys.path:\n sys.path.append(utils_path)\nimport util_files\nimport util_cloud\n\nimport subprocess\nimport urllib\nfrom zipfile import ZipFile\nimport ee\nfrom google.cloud import storage\nimport logging\nfrom pprint import pprint\nfrom collections import OrderedDict \n\n# Get the top-level logger object\nlogger = logging.getLogger()\nfor handler in logger.handlers: logger.removeHandler(handler)\nlogger.setLevel(logging.DEBUG)\n# make it print to the console.\nconsole = logging.StreamHandler()\nlogger.addHandler(console)\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n# name of asset on GEE where you want to upload data\n# this should be an asset name that is not currently in use\ndataset_name = 'ocn_014_index_of_coastal_protection_by_coral_reefs'\nlogger.info('Executing script for dataset: ' + dataset_name)\n\n# set working directory for processing dataset, and creates needed directories as necessary\ndata_dir = util_files.prep_dirs(dataset_name)\nlogger.debug('Data directory relative path: '+data_dir)\nlogger.debug('Data directory absolute path: '+os.path.abspath(data_dir))\n\n'''\nDownload data and save to your data directory\n'''\n# IMPORTANT: data downloaded manually\n# zipped geodatabase provided by dataset steward at tnc:\n# MOW_Coral_Reef_ES_Data.gdb.zip\n# this was downloaded and subsequently manipulated within qgis\n\n'''\nProcess data\n'''\n# IMPORTANT: initial processing was executed manually\n# qgis was used to convert vector gdb layer into shapefile: \n# MOW_Coral_Reef_ES_Data MOW_Global_Coral_Protection_dis\n# the resulting shapefile was placed in the data folder for further processing\n\n# convert from vector grid to proper raster\nvector_names = [\n 'mow_coastal-protection-index',\n ]\n\n# rasterize data\n# gdal bytes are unsigned so nodata value is 0\nvector_path = os.path.join(data_dir,vector_names[0]+'.shp')\nprocessed_data_file = os.path.join(data_dir,dataset_name+'.tif')\n\ncmd = 'gdal_rasterize -l {} -a GRIDCODE -tr 500.0 500.0 -a_nodata 0 -te -20037125.59408577 -3768750.9896163875 20037391.091492712 3831510.076516077 -ot Byte -of GTiff {} {}'.format(vector_names[0],vector_path,processed_data_file)\ncompleted_process = subprocess.run(cmd, shell=False)\nlogger.debug(str(completed_process))\n\n# create dictionary for tracking info about individual variable datasets and\n# their representation on google earth engine\ndata_dict = OrderedDict()\ndata_dict['coastal-protection'] = {\n 'url': None,\n 'missing_data': [\n 0,\n ],\n 'pyramiding_policy': 'MEAN',\n 'raw_data_file': processed_data_file,\n }\n\n'''\nUpload processed data to Google Earth Engine\n'''\n# set up Google Cloud Storage project and bucket objects\ngcs_client = storage.Client(os.environ.get(\"CLOUDSDK_CORE_PROJECT\"))\ngcs_bucket = gcs_client.bucket(os.environ.get(\"GEE_STAGING_BUCKET\"))\n\n# initialize ee (Google Earth Engine Python API) for uploading to GEE\nauth = ee.ServiceAccountCredentials(os.getenv('GEE_SERVICE_ACCOUNT'), os.getenv('GOOGLE_APPLICATION_CREDENTIALS'))\nee.Initialize(auth)\n\nlogger.info('Uploading processed data to Google Cloud Storage.')\ngcs_uris = util_cloud.gcs_upload(processed_data_file, dataset_name, gcs_bucket=gcs_bucket)\n\nlogger.info('Uploading processed data to Google Earth Engine.')\n# generate bands component of GEE upload manifest\nmf_bands = util_cloud.gee_manifest_bands(data_dict, dataset_name)\n# upload processed data file to GEE\nasset_name = f'projects/resource-watch-gee/{dataset_name}'\n\nmanifest = util_cloud.gee_manifest_complete(asset_name, gcs_uris[0], mf_bands)\nlogger.debug(manifest)\ntask_id = util_cloud.gee_ingest(manifest, public=True)\n\nutil_cloud.gcs_remove(gcs_uris, gcs_bucket=gcs_bucket)\nlogger.info('Files deleted from Google Cloud Storage.')\n\n'''\nUpload original data and processed data to Amazon S3 storage\n'''\n# amazon storage info\naws_bucket = 'wri-projects'\ns3_prefix = 'resourcewatch/raster/'\n\nlogger.info('Uploading original data to S3.')\n# copy the raw data into a zipped file to upload to S3\nraw_data_dir = os.path.join(data_dir, dataset_name+'.zip')\nuploaded = util_cloud.aws_upload(raw_data_dir, aws_bucket, s3_prefix+os.path.basename(raw_data_dir))\n\nlogger.info('Uploading processed data to S3.')\n# copy the processed data into a zipped file to upload to S3\nprocessed_data_dir = os.path.join(data_dir, dataset_name+'_edit.zip')\nwith ZipFile(processed_data_dir,'w') as zip:\n zip.write(processed_data_file, os.path.basename(processed_data_file))\n# upload processed data file to S3\nuploaded = util_cloud.aws_upload(processed_data_dir, aws_bucket, s3_prefix+os.path.basename(processed_data_dir))","sub_path":"ocn_014_index_of_coastal_protection_by_coral_reefs/ocn_014_index_of_coastal_protection_by_coral_reefs.py","file_name":"ocn_014_index_of_coastal_protection_by_coral_reefs.py","file_ext":"py","file_size_in_byte":5163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"532263849","text":"\"\"\"\nUsage: python infer.py --bs= --intra= --inter= \n\"\"\"\n\nimport argparse\nimport time\nimport pickle\nimport tensorflow as tf\nfrom input import DataInput, DataInputTest\nfrom model import Model\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--bs\",type=int)\nparser.add_argument(\"--intra\",type=int,default=0)\nparser.add_argument(\"--inter\",type=int,default=0)\n\nargs = parser.parse_args()\n\n\n\nwith open('dataset.pkl', 'rb') as f:\n train_set = pickle.load(f)\n test_set = pickle.load(f)\n cate_list = pickle.load(f)\n user_count, item_count, cate_count = pickle.load(f)\n\nmodel = Model(user_count, item_count, cate_count, cate_list)\n\ntest_batch_size = args.bs\n\ncpu_config = tf.ConfigProto()\ncpu_config.intra_op_parallelism_threads = args.intra\ncpu_config.inter_op_parallelism_threads = args.inter\n\n\nt1=time.time()\n\nwith tf.Session(config=cpu_config) as sess:\n model.restore(sess,'save_path/ckpt')\n for k, uij in DataInputTest(test_set, test_batch_size):\n model.test(sess,uij)\n\nt2=time.time()\n\nf = open('inference_time.txt','a')\nf.write(str(t2-t1)+\"\\n\")\nf.close()\n","sub_path":"din/infer.py","file_name":"infer.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"453302283","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'a test module'\n__author__ = 'Yinghao Jiang'\n\nimport functools\n\n'''\ndef log(func):\n @functools.wraps(func) #装饰之后的函数名由wwrapper改为本来的now\n def wrapper(*args, **kw):\n print('call %s():' % func.__name__)\n return func(*args, **kw)\n return wrapper\n'''\n\ndef log(f):\n text = ''\n if isinstance(f, str):\n text = f\n def decorator(func = f):\n print(func)\n @functools.wraps(func) #装饰之后的函数名由wrapper改为本来的now\n def wrapper(*args, **kw):\n print('%s %s() : start' % (text, func.__name__))\n f = func(*args, **kw)\n print('%s %s() : ended' % (text, func.__name__))\n return f\n return wrapper\n return decorator\n\n@log\ndef now():\n\tprint('2017-06-29');\n\nif __name__ == '__main__':\n now()\n","sub_path":"demo1.py","file_name":"demo1.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"324733874","text":"\n\nfrom numba import njit, prange\n\n#import matplotlib\n#matplotlib.use('Agg')\n#import matplotlib.pyplot as plt\n#plt.style.use('ggplot')\n\n\n#from six.moves import xrange\nimport os, sys\n#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' \n\nfrom util import load_single_image, normalize\nimport sys\nfrom PIL import Image\nfrom io import BytesIO\nimport os\n#import pkg_resources\n#pkg_resources.require(\"numpy==1.15.4\")\nimport numpy as np\nfrom util import load_image, array2PIL, in_memory_jpeg_compression\n#import argparse\n#from scipy.stats import percentileofscore\n\n#import pandas as pd\nfrom model import CNN\nfrom params import HyperParams\nimport skimage.io\n#import tensorflow as tf\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\n\n\nmap = 'msroi_map.jpg'\nfind_best = 1\nthreshold_pct = 10\njpeg_compression = 50\nuse_convert = 0\n\n@njit(parallel=True)\ndef processcal(shape1,shape2,shape3,sal_arr,q_a,low,high,img_qualities,k):\n for i in prange(shape1):\n for j in prange(shape2):\n for l in prange(shape3):\n ss = sal_arr[i,j]\n\n for index, q_i in enumerate(q_a):\n if ss < q_i:\n qq = index + 1\n break\n\n \n\n if qq < low : qq = low\n if qq > high: qq = high\n k[i,j,l] = img_qualities[qq][i,j,l]\n\n return k\n \n\n\n \n\ndef make_quality_compression(original,sal,imgg,original1):\n \n #if the size of the map is not the same original image, then blow it'''\n if original.size != sal.size:\n sal = sal.resize(original.size)\n\n sal_arr = np.asarray(sal)\n img_qualities = []\n quality_steps = [i*10 for i in range(1,11)]\n\n # this temp directory will be deleted, do not use this to store your files\n os.makedirs('temp_xxx_yyy')\n for q in quality_steps:\n name = 'temp_xxx_yyy/temp_' + str(q) + '.jpg'\n if use_convert:\n os.system('convert -colorspace sRGB -filter Lanczos -interlace Plane -type truecolor -quality ' + str(q) + ' ' + image + ' ' + name)\n else:\n original.save(name, quality=q)\n img_qualities.append(np.asarray(Image.open(name)))\n os.remove(name)\n os.rmdir('temp_xxx_yyy')\n\n k = img_qualities[-1][:] # make sure it is a copy and not reference\n shape = k.shape\n\n #print(\"SHAPE TUPLE : \",shape)\n k.flags.writeable = True\n mx, mn = np.max(sal_arr), np.mean(sal_arr)\n\n sal_flatten = sal_arr.flatten()\n\n q_a = [np.percentile(sal_arr, j) for j in quality_steps]\n low, med, high = 1, 5, 9\n\n\n\n k = processcal(shape[0],shape[1],shape[2],sal_arr,q_a,low,high,img_qualities,k)\n\n original_size = in_memory_jpeg_compression(original,50)\n\n\n #print(\"Original_size\",original_size)\n\n out_img = array2PIL(k)\n\n qua = 0\n if find_best:\n #out_name = output_directory + '/' + '_compressed_' + imgg.split('/')[-1] + '_' + '.jpg'\n for qual in range(90,20,-1):\n out_img = out_img.convert(\"RGB\")\n #out_img.save(out_name, quality=qual)\n current_size = in_memory_jpeg_compression(out_img,qual)\n if current_size<= original_size*(1 + threshold_pct/100.0):\n qua = qual\n break\n else:\n pass\n\n #out_img.save(out_name, quality=qua)\n\n \n \n\n out_img.save(\"compressed_image.jpg\", quality=qua)\n \n \n \n \n\n\n\n \n\n\n\n\ndef compression_engine(img):\n\n image = load_single_image(img)\n\n print(\"INPUT IMAGE ARRAY \",image.shape)\n\n hyper = HyperParams(verbose=False)\n images_tf = tf.placeholder(tf.float32, [None, hyper.image_h, hyper.image_w, hyper.image_c], name=\"images\")\n class_tf = tf.placeholder(tf.int64, [None], name='class')\n\n cnn = CNN()\n if hyper.fine_tuning:\n cnn.load_vgg_weights()\n\n conv_last, gap, class_prob = cnn.build(images_tf)\n classmap = cnn.get_classmap(class_tf, conv_last)\n\n with tf.Session() as sess:\n tf.train.Saver().restore( sess, hyper.model_path )\n conv_last_val, class_prob_val = sess.run([conv_last, class_prob], feed_dict={images_tf: image})\n\n # use argsort instead of argmax to get all the classes\n class_predictions_all = class_prob_val.argsort(axis=1)\n\n roi_map = None\n for i in range(-1 * hyper.top_k,0):\n\n current_class = class_predictions_all[:,i]\n classmap_vals = sess.run(classmap, feed_dict={class_tf: current_class, conv_last: conv_last_val})\n normalized_classmap = normalize(classmap_vals[0])\n\n if roi_map is None:\n roi_map = 1.2 * normalized_classmap\n else:\n # simple exponential ranking\n roi_map = (roi_map + normalized_classmap)/2\n roi_map = normalize(roi_map)\n\n # save the plot and the map\n skimage.io.imsave( 'msroi_map.jpg', roi_map )\n \n\n\n\n\n original = Image.open(img)\n\n #print(\"ORIGINAL : \",original)\n sal = Image.open('msroi_map.jpg')\n\n make_quality_compression(original,sal,img,original)\n\n\n","sub_path":"Image_compression_deployment/compression.py","file_name":"compression.py","file_ext":"py","file_size_in_byte":5049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"541837951","text":"a = [1,3 ,5]\nb = [1, 6, 7]\ne = [[1,3 ,5], [1, 6, 7], [2, 3, 5]]\nprint(max(e))\n\nclass Solution(object):\n def maxNumber(self, nums1, nums2, k):\n \"\"\"\n :type nums1: List[int]\n :type nums2: List[int]\n :type k: int\n :rtype: List[int]\n \"\"\"\n def pick_max(nums, k):\n stack = []\n drop = len(nums) - k\n for num in nums:\n while drop and stack and stack[-1] < num:\n stack.pop()\n drop -= 1\n stack.append(num)\n return stack[:k]\n\n def merge(A, B):\n ans = []\n while A or B:\n bigger = A if A > B else B\n ans.append(bigger[0])\n bigger.pop(0)\n return ans\n\n return max(merge(pick_max(nums1, i), pick_max(nums2, k-i)) for i in range(k+1) if i <= len(nums1) and (k-i) <= len(nums2))","sub_path":"刷题leetcode/321. 拼接最大数.py","file_name":"321. 拼接最大数.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"638254544","text":"from subprocess import CompletedProcess, CalledProcessError\nfrom unittest import TestCase\nfrom unittest.mock import MagicMock, patch\n\nfrom more_itertools.more import side_effect\n\nfrom pygitstory.commands import *\nfrom pygitstory.exceptions import *\n\n\nclass TestGitCommands(TestCase):\n\n @patch('pygitstory.commands.subprocess.run')\n @patch('pygitstory.commands.path_exists', return_value=False)\n def test_clone(self, exists_mock, run_mock):\n result = clone('url', 'dir')\n run_mock.assert_called()\n command_args = run_mock.call_args[0][0]\n self.assertListEqual(command_args, ['git', 'clone', 'url', 'dir'])\n self.assertTrue(result)\n\n @patch('pygitstory.commands.subprocess.run')\n @patch('pygitstory.commands.path_exists', return_value=True)\n def test_clone_does_nothing_when_cloned(self, exists_mock, run_mock):\n result = clone('url', 'dir')\n self.assertFalse(run_mock.called)\n self.assertFalse(result)\n\n @patch('pygitstory.commands.subprocess.run', side_effect=CalledProcessError(\n cmd=[],\n returncode=128,\n stderr=\n '''remote: Not Found\n fatal: repository 'https://github.com/a/' not found'''\n ))\n @patch('pygitstory.commands.path_exists', return_value=False)\n def test_clone_non_existing(self, exists_mock, run_mock):\n with self.assertRaises(RepoNotFound):\n clone('https://github.com/a/', 'dir')\n\n @patch('pygitstory.commands.subprocess.run', side_effect=CalledProcessError(\n cmd=[],\n returncode=128,\n stderr=\n '''fatal: unable to access 'https://invalidhost/': Could not resolve host: invalidhost'''\n ))\n @patch('pygitstory.commands.path_exists', return_value=False)\n def test_clone_invalid_git_host(self, exists_mock, run_mock):\n with self.assertRaises(InvalidGitHost):\n clone('https://invalidhost', 'dir')\n\n\n @patch('pygitstory.commands.subprocess.run', side_effect=CalledProcessError(\n cmd=[],\n returncode=128,\n stderr=\n '''fatal: could not create work tree dir '/a': Permission denied'''\n ))\n @patch('pygitstory.commands.path_exists', return_value=False)\n def test_clone_denied_repos_dir(self, exists_mock, run_mock):\n with self.assertRaises(InvalidReposDirectory):\n clone('host', 'denied_dir')\n\n @patch('pygitstory.commands.subprocess.run', side_effect=CalledProcessError(\n cmd=[],\n returncode=128,\n stderr=\n '''fatal: destination path '/' already exists and is not an empty directory.'''\n ))\n @patch('pygitstory.commands.path_exists', return_value=False)\n def test_clone_non_empty_repos_dir(self, exists_mock, run_mock):\n with self.assertRaises(InvalidReposDirectory):\n clone('host', 'non_empty_dir')\n\n @patch('pygitstory.commands.subprocess.run', return_value=CompletedProcess(\n args=[], \n returncode=0,\n stdout='output')\n )\n def test_log(self, run_mock):\n output = log('path')\n run_mock.assert_called()\n command_args = run_mock.call_args[0][0]\n self.assertListEqual(command_args, [\n 'git', '--git-dir=path/.git', 'log', '--format=format:{}'.format(LOG_FORMAT), '--reverse'])\n self.assertEqual(output, 'output')\n print(LOG_FORMAT)\n","sub_path":"pygitstory/tests/test_commands.py","file_name":"test_commands.py","file_ext":"py","file_size_in_byte":3336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"193592869","text":"import argparse, json\n\nfrom boto.mturk.connection import MTurkConnection\nfrom boto.mturk.qualification import *\nfrom jinja2 import Environment, FileSystemLoader\n\n\n\"\"\"\nA bunch of free functions that we use in all scripts.\n\"\"\"\n\n\ndef get_jinja_env(config):\n \"\"\"\n Get a jinja2 Environment object that we can use to find templates.\n \"\"\"\n return Environment(loader=FileSystemLoader(config['template_directories']))\n\n\ndef json_file(filename):\n with open(filename, 'r') as f:\n return json.load(f)\n\n\ndef get_parent_parser():\n \"\"\"\n Get an argparse parser with arguments that are always needed\n \"\"\"\n parser = argparse.ArgumentParser(add_help=False)\n parser.add_argument('--prod', action='store_false', dest='sandbox',\n default=True,\n help=\"Whether to run on the production AMT site.\")\n parser.add_argument('--hit_ids_file')\n parser.add_argument('--config', default='config.json',\n type=json_file)\n return parser\n\n\ndef get_mturk_connection_from_args(args):\n \"\"\"\n Utility method to get an MTurkConnection from argparse args.\n \"\"\"\n aws_access_key = args.config.get('aws_access_key')\n aws_secret_key = args.config.get('aws_secret_key')\n return get_mturk_connection(sandbox=args.sandbox,\n aws_access_key=aws_access_key,\n aws_secret_key=aws_secret_key)\n\n\ndef get_mturk_connection(sandbox=True, aws_access_key=None,\n aws_secret_key=None):\n \"\"\"\n Get a boto mturk connection. This is a thin wrapper over the\n MTurkConnection constructor; the only difference is a boolean\n flag to indicate sandbox or not.\n \"\"\"\n kwargs = {}\n if aws_access_key is not None:\n kwargs['aws_access_key_id'] = aws_access_key\n if aws_secret_key is not None:\n kwargs['aws_secret_access_key'] = aws_secret_key\n\n if sandbox:\n host = 'mechanicalturk.sandbox.amazonaws.com'\n else:\n host='mechanicalturk.amazonaws.com'\n return MTurkConnection(host=host, **kwargs)\n\n\ndef setup_qualifications(hit_properties):\n \"\"\"\n Replace some of the human-readable keys from the raw HIT properties\n JSON data structure with boto-specific objects.\n \"\"\"\n qual = Qualifications()\n if 'country' in hit_properties:\n qual.add(LocaleRequirement('EqualTo',\n hit_properties['country']))\n del hit_properties['country']\n\n if 'hits_approved' in hit_properties:\n qual.add(NumberHitsApprovedRequirement('GreaterThan',\n hit_properties['hits_approved']))\n del hit_properties['hits_approved']\n\n if 'percent_approved' in hit_properties:\n qual.add(PercentAssignmentsApprovedRequirement('GreaterThan',\n hit_properties['percent_approved']))\n del hit_properties['percent_approved']\n\n hit_properties['qualifications'] = qual\n","sub_path":"simpleamt.py","file_name":"simpleamt.py","file_ext":"py","file_size_in_byte":2767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"151783153","text":"import time\nimport os\nimport numpy as np\nfrom collections import defaultdict\nfrom bisect import bisect_left\nimport tensorflow as tf\nfrom tensorflow.contrib import learn\nimport pandas as pd\nfrom sklearn.metrics import f1_score\n\ndef get_word_vocab(urls, max_length_words, min_word_freq=0):\n '''\n 滤掉低频词,得到词典\n :param urls: shape = [ 'url1', 'url2', 'url3' ....]\n :param max_length_words: url的最大长度\n :param min_word_freq: 词语出现的频率\n :return:\n '''\n\n vocab_processor = learn.preprocessing.VocabularyProcessor(max_length_words, min_frequency=min_word_freq)\n start = time.time()\n x = np.array(list(vocab_processor.fit_transform(urls)))\n print(\"Finished build vocabulary and mapping to x in {}\".format(time.time() - start))\n vocab_dict = vocab_processor.vocabulary_._mapping\n reverse_dict = dict(zip(vocab_dict.values(), vocab_dict.keys()))\n print(\"Size of word vocabulary: {}\".format(len(reverse_dict)))\n return x, reverse_dict\n\n\n\ndef add_special_char(x, reverse_dict, delimit_mode, urls=None):\n '''\n 通过word_id 还原成原来的url列表\n 见论文:\n :param x: shape is [[word_id1, word_id2, ... ], [ ... ], ...]\n :param reverse_dict:\n :param delimit_mode:\n 模式 0 url中的 词 按照 特殊字符切分,切分后不保留特殊字符\n 模式 1 url中的 词 按照 特殊字符切分,切分后保留特殊字符\n :param urls:按照\n :return:\n '''\n processed_x = []\n # 模式 --------------------------- 0\n if delimit_mode == 0:\n for url in x:\n words = []\n for word_id in url:\n if word_id != 0:\n words.append(reverse_dict[word_id])\n else:\n break\n processed_x.append(words)\n # 模式 ---------------------------- 1\n # 参考原文\n # we hypothesize that special characters offer significant information gain for\n # Malicious URL Detection because special characters are more frequent and relevant\n # in the contxt of URLs than normal natural languages.As URL does not follow normal\n # semantic syntax, special characters can play an important feature and should be\n # considered with words.\n elif delimit_mode == 1:\n for i in range(x.shape[0]):\n word_url = x[i]\n raw_url = urls[i]\n words = []\n for w in range(len(word_url)):\n word_id = word_url[w]\n if word_id == 0:\n words.extend(list(raw_url))\n break\n else:\n word = reverse_dict[word_id]\n idx = raw_url.index(word)\n special_chars = list(raw_url[0:idx])\n words.extend(special_chars)\n words.append(word)\n raw_url = raw_url[idx+len(word):]\n if w == len(word_url) - 1:\n words.extend(list(raw_url))\n processed_x.append(words)\n return processed_x\n\ndef get_word_id_x(word_x, max_len_subwords, high_freq_words=None):\n\n all_chars = set()\n chared_x = []\n all_words = set()\n worded_x = []\n counter = 0\n for url in word_x: # 遍历所有的url\n if counter % 100000 == 0:\n print(\"Processing #url {}\".format(counter))\n counter += 1\n url_in_chars = [] # [[char_11, char_12], [char_21, char_22]]\n url_in_words = [] # [word1, word2]\n words = url\n for word in words: # 遍历每个url中所有的词\n chars_list = list(word) # 得到这个词中所有的字符列表\n # 判断 词的长度是否大于最大长度 || (高频词列表不是None && 词的长度大于1 && 词不再高频词表中)\n if (len(chars_list) > max_len_subwords) or \\\n (high_freq_words is not None and len(word) > 1 and not word in high_freq_words):\n all_chars.update(chars_list[:max_len_subwords]) # 更新所有的字符(截取长度后)\n url_in_chars.append(chars_list[:max_len_subwords]) # 添加到 url_in_chars\n all_words.add(\"\") # all_words 添加 UNK\n url_in_words.append(\"\") # url_in_words 添加 UNK\n else:\n all_chars.update(chars_list) # 更新字符\n url_in_chars.append(chars_list)\n all_words.add(word)\n url_in_words.append(word)\n chared_x.append(url_in_chars)\n worded_x.append(url_in_words)\n\n # chared_x\n # [[[char_111, char_112, char_113, ....],[char_121, char_122, char_123,...],...],[[]]]\n\n\n all_chars = list(all_chars)\n chars_dict = dict()\n for i in range(len(all_chars)):\n chars_dict[all_chars[i]] = i + 1 # char id=0 is for padding ngram\n print(\"Size of ngram vocabulary: {}\".format(len(chars_dict)))\n all_words = list(all_words)\n words_dict = dict()\n for i in range(len(all_words)):\n words_dict[all_words[i]] = i + 1 # word id=0 for padding word\n print(\"Size of word vocabulary: {}\".format(len(words_dict)))\n print(\"Index of word: {}\".format(words_dict[\"\"]))\n\n chared_word_id_x = []\n for chared_url in chared_x:\n url_in_chars = []\n for chared_word in chared_url:\n char_word_ids = [chars_dict[x] for x in chared_word] # 将每个char换成 char_dict中的标号\n url_in_chars.append(char_word_ids)\n chared_word_id_x.append(url_in_chars)\n worded_id_x = []\n for worded_url in worded_x:\n word_ids = [words_dict[x] for x in worded_url]\n worded_id_x.append(word_ids)\n\n return np.asarray(chared_word_id_x), chars_dict, np.asarray(worded_id_x), words_dict\n\n\ndef chared_id_x_from_dict(word_x, max_len_subwords, char_dict, word_dict=None):\n chared_worded_id_x = []\n worded_id_x = []\n counter = 0\n if word_dict:\n word_vocab = sorted(list(word_dict.keys()))\n for url in word_x:\n if counter % 100000 == 0:\n print(\"Processing url #{}\".format(counter))\n counter += 1\n url_in_chared_words = []\n url_in_words = []\n words = url\n for word in words:\n char_list = list(word)\n if len(char_list) > max_len_subwords:\n word = \"\"\n char_list_id = []\n for ngram in char_list:\n if ngram in char_dict:\n char_list_id.append(char_dict[ngram])\n else:\n char_list_id.append(0)\n url_in_chared_words.append(char_list_id)\n if word in word_vocab:\n word_id = word_dict[word]\n else:\n word_id = word_dict[\"\"]\n url_in_words.append(word_id)\n chared_worded_id_x.append(url_in_chared_words)\n worded_id_x.append(url_in_words)\n\n return chared_worded_id_x, worded_id_x\n\n\n\ndef is_in(a,x):\n i = bisect_left(a,x)\n if i != len(a) and a[i] == x:\n return True\n else:\n return False\n\n\ndef read_data(file_dir):\n df = pd.read_csv(file_dir)\n return df['url'].values, df['label'].values\n\ndef char_id_x(urls, char_dict, max_len_chars):\n chared_id_x = []\n for url in urls:\n url = list(url)\n url_in_char_id = []\n l = min(len(url), max_len_chars)\n for i in range(l):\n c = url[i]\n try:\n c_id = char_dict[c]\n except KeyError:\n c_id = 0\n url_in_char_id.append(c_id)\n chared_id_x.append(url_in_char_id)\n return np.asarray(chared_id_x)\n\n\n\ndef pad_seq_in_word(urls, max_d1=0, embedding_size=128):\n if max_d1 == 0:\n url_lens = [len(url) for url in urls]\n max_d1 = max(url_lens)\n pad_urls = np.zeros((len(urls), max_d1))\n #pad_idx = np.zeros((len(urls), max_d1, embedding_size))\n #pad_vec = [1 for i in range(embedding_size)]\n for d0 in range(len(urls)):\n url = urls[d0]\n for d1 in range(len(url)):\n if d1 < max_d1:\n pad_urls[d0,d1] = url[d1]\n #pad_idx[d0,d1] = pad_vec\n return pad_urls\n\n\ndef pad_seq(urls, max_d1=0, max_d2=0, embedding_size=128):\n if max_d1 == 0 and max_d2 == 0:\n for url in urls:\n if len(url) > max_d1:\n max_d1 = len(url)\n for word in url:\n if len(word) > max_d2:\n max_d2 = len(word)\n pad_idx = np.zeros((len(urls), max_d1, max_d2, embedding_size))\n pad_urls = np.zeros((len(urls), max_d1, max_d2))\n pad_vec = [1 for i in range(embedding_size)]\n for d0 in range(len(urls)):\n url = urls[d0]\n for d1 in range(len(url)):\n if d1 < max_d1:\n word = url[d1]\n for d2 in range(len(word)):\n if d2 < max_d2:\n pad_urls[d0,d1,d2] = word[d2]\n pad_idx[d0,d1,d2] = pad_vec\n return pad_urls, pad_idx","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"383657989","text":"__author__ = 'nsifniotis'\n\n\ncode_length = 0\nstring_length = 0\nex_count = 0\nwith open(\"input_8.txt\") as input_file:\n for line in input_file.readlines():\n line = line.strip()\n code_length += len(line)\n\n nu_line = line.replace(\"\\\\\", \"\\\\\\\\\")\n nu_line = nu_line.replace(\"\\\"\", \"\\\\\\\"\")\n string_length += 2 + len(nu_line)\n\n print (line + \" -> \" + nu_line)\n\nprint(code_length - string_length)","sub_path":"day_eight.py","file_name":"day_eight.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"131319260","text":"import discord\nimport asyncio\nimport aiosqlite\nimport random\nimport difflib\n\nfrom discord.ext import commands\n\nclass Inventory:\n def __init__(self, bot):\n self.bot = bot\n\n async def on_message(self, message):\n if message.author.bot or not isinstance(message.channel, discord.DMChannel):\n return\n\n def check(reaction, user):\n return user == message.author\n\n def check2(msg):\n return msg.author == message.author and msg.content.strip() != \"\"\n\n salutations = [\"bonjour\", \"hello\", \"hi\", \"salut\", \"hey\", \"je t'aime\"]\n content = message.content.lower()\n if any(mot in content for mot in salutations):\n await message.channel.trigger_typing()\n await asyncio.sleep(2)\n\n users = await self.get_users()\n user_list = [user[0] for user in users]\n\n if message.author.id in user_list:\n inventory = await self.get_inventory()\n e = discord.Embed(description=inventory)\n e.set_author(name=\"Contenu de l'inventaire\")\n\n print(\"Showing inventory for {}\".format(message.author.display_name))\n\n await message.channel.send(\"Oh! Bonjour! Tu m'as déjà grandement aidé, je n'ai pas besoin d'aide supplémentaire. Merci beaucoup! Voici le contenu de l'inventaire de Princesseuh : \", embed=e)\n return\n\n intro_message = await message.channel.send(\"Oh! Bonjour! Je suis Inventaire. Habituellement, je m'occupe de gérer l'inventaire de Princesseuh mais.. J'ai eu un petit problème et j'aurais bien besoin de ton aide!\\n\\nC'est un peu gênant mais.. j'ai perdu tous ses objets! Enfin pas tous.. J'ai réussi à récupérer quelques trucs mais impossible de trouver le reste! Est-ce que tu voudrais bien m'aider?\")\n await intro_message.add_reaction(\"🇴\")\n await intro_message.add_reaction(\"🇳\")\n\n try:\n reaction, user = await self.bot.wait_for(\"reaction_add\", timeout=30.0, check=check)\n except asyncio.TimeoutError:\n await message.channel.send(\"Ah.. Je présume que ça veut dire non. Dommage.\")\n return\n else:\n if str(reaction.emoji) == \"🇴\":\n await message.channel.trigger_typing()\n await asyncio.sleep(2)\n inventory = await self.get_inventory()\n e = discord.Embed(description=inventory)\n e.set_author(name=\"Contenu de l'inventaire\")\n\n await message.channel.send(\"Ohh! Merci beaucoup! Je ne sais pas ce que j'aurais fait sans toi! Connaissant Princesseuh, elle aurait été furieuse si son inventaire avait été perdu (et elle m'aurait sûrement tapée!)\\nVoici ce qu'il y a dans l'inventaire pour le moment :\", embed=e)\n\n await message.channel.trigger_typing()\n await asyncio.sleep(4)\n await message.channel.send(\".. Oh?! Tu as déjà trouvé quelque chose? Chouette! Ajoutons ça à l'inventaire!\\n\\n(Tape le nom de l'objet à ajouter)\")\n\n too_big = True\n while (too_big):\n try:\n msg = await self.bot.wait_for(\"message\", timeout=90.0, check=check2)\n except asyncio.TimeoutError:\n await message.channel.send(\"Hmm.. Désolée mais.. Je n'ai pas toute la journée! Je dois retrouver ses objets. Reviens quand tu seras sûr de ce que tu as trouvé.\")\n return\n else:\n await message.channel.trigger_typing()\n await asyncio.sleep(2)\n\n if (len(msg.content.strip()) > 100):\n await message.channel.send(\"Hmm.. C'est un peu trop gros pour l'inventaire.. Est-ce que tu pourrais trouver quelque chose d'autre? (Tape le nom d'un objet de moins de 100 caractères)\")\n else:\n break\n\n possible_messages = [\n \"`{}`?! Je ne suis pas sûre qu'elle avait ça dans son inventaire mais bon.. Autant l'ajouter, elle sera contente! J'ajoute ça à l'inventaire!\",\n \"Wow! `{}`! La plupart des gens l'auraient gardé pour eux-mêmes. Merci beaucoup! Elle sera contente! Ajoutons cet objet à l'inventaire..\",\n \"Hmm.. Je ne suis pas sûre comment je vais faire pour faire rentrer ça dans l'inventaire mais si tu pense qu'elle avait ça.. Je vais l'ajouter à l'inventaire!\"\n ]\n\n rnumber = random.randint(0, len(possible_messages)-1)\n await message.channel.send(possible_messages[rnumber].format(msg.content.strip()))\n too_big = False\n\n await message.channel.trigger_typing()\n\n async with aiosqlite.connect(\"database.db\") as db:\n await db.execute(\"INSERT INTO items VALUES(NULL, ?, ?)\", (msg.content.strip(), msg.author.id))\n await db.execute(\"INSERT INTO users VALUES(NULL, ?)\", [msg.author.id])\n await db.commit()\n\n print(\"{} added to inventory by {}\".format(msg.content.strip(), message.author.display_name))\n\n await asyncio.sleep(3)\n offer = await message.channel.send(\"Et voilà! C'est fait!\\n\\nJ'aimerais te remercier avec quelque chose d'un peu plus conséquent qu'un simple merci.. Eeeet siiii.. Je te laissais prendre un des objets de Princesseuh en échange de celui que tu viens de me donner? Ça te dirait? Je suis sûre qu'elle ne sera pas trop fâchée!\")\n await offer.add_reaction(\"🇴\")\n await offer.add_reaction(\"🇳\")\n\n try:\n reaction, user = await self.bot.wait_for(\"reaction_add\", timeout=60.0, check=check)\n except asyncio.TimeoutError:\n await message.channel.send(\"Ha! Je vais prendre ça pour un non :stuck_out_tongue: Ton honnêteté est honorable :) Merci beaucoup pour ton aide! Reparle moi si tu veux voir le contenu de l'inventaire de Princesseuh!\")\n return\n else:\n if str(reaction.emoji) == \"🇴\":\n await message.channel.trigger_typing()\n await asyncio.sleep(2)\n\n inventory = await self.get_inventory()\n e = discord.Embed(description=inventory)\n e.set_author(name=\"Contenu de l'inventaire\")\n\n await message.channel.send(\"Ooh! Qu'est-ce que tu souhaiterais prendre? Pour rappel, voici ce qu'il y a dans son inventaire actuellement (Tape le nom de l'objet que vous souhaitez prendre) : \", embed=e)\n\n no_object = True\n msg3 = \"\"\n while (no_object):\n try:\n msg2 = await self.bot.wait_for(\"message\", timeout=120.0, check=check2)\n except asyncio.TimeoutError:\n await message.channel.send(\"Désolée! Je n'ai pas toute la journée! J'espère qu'un merci suffit! Reparle moi si tu veux voir le contenu de l'inventaire de Princesseuh!\")\n return\n else:\n item_to_get = msg2.content.strip()\n inventory = await self.get_inventory(False)\n item_list = [item[0] for item in inventory]\n\n if item_to_get in item_list:\n no_object = False\n msg3 = item_to_get\n break\n else:\n match = difflib.get_close_matches(item_to_get, item_list)\n matches = \" Peut-être voulais-tu dire {}?\".format(\", \".join(\"`{}`\".format(result) for result in match))\n\n await message.channel.send(\"Hmm.. Désolée mais je ne trouve pas de `{}` dans l'inventaire! Réessayons!{}\".format(item_to_get, matches if match else \"\"))\n\n await message.channel.trigger_typing()\n\n async with aiosqlite.connect(\"database.db\") as db:\n await db.execute(\"DELETE FROM items where id in (SELECT id FROM items WHERE item == (?) LIMIT 1)\", [msg3])\n await db.commit()\n\n print(\"{} removed from inventory by {}\".format(msg3, message.author.display_name))\n\n await asyncio.sleep(2)\n await message.channel.send(\"Ha! Bon choix, je suis sûre qu'elle n'a pas besoin de ce `{}`! C'est à toi maintenant :)\\nMerci beaucoup de ton aide! Si tu veux voir ce qu'il y a dans l'inventaire, suffit de revenir me dire bonjour! :)\".format(msg3))\n return\n elif str(reaction.emoji) == \"🇳\":\n await message.channel.trigger_typing()\n await asyncio.sleep(2)\n await message.channel.send(\"Ha! Je ne sais pas si Princesseuh a vraiment besoin de tout ce qu'elle a dans son sac mais je suis sûre qu'elle apprécie ton honnêteté! :) Merci beaucoup de ton aide! Si tu veux voir ce qu'il y a dans l'inventaire, suffit de revenir me dire bonjour!\")\n return\n\n elif str(reaction.emoji) == \"🇳\":\n await message.channel.trigger_typing()\n await asyncio.sleep(1)\n await message.channel.send(\"Ah.. Dommage.. Si tu change d'idée, revient me parler, j'ai toujours besoin d'une aide comme la tienne!\")\n return\n\n @commands.command(aliases=[\"d\"])\n @commands.is_owner()\n async def debug(self, ctx):\n items = await self.get_inventory()\n users = await self.get_users()\n print(items)\n print(users)\n\n async def get_users(self):\n async with aiosqlite.connect(\"database.db\") as db:\n cursor = await db.execute(\"SELECT discord_id FROM users\")\n rows = await cursor.fetchall()\n await cursor.close()\n\n return rows\n\n async def get_inventory(self, as_string=True):\n async with aiosqlite.connect(\"database.db\") as db:\n cursor = await db.execute('SELECT item, COUNT(item) FROM items GROUP BY item ORDER BY count(item) DESC')\n rows = await cursor.fetchall()\n await cursor.close()\n\n if as_string:\n result = \"\\n\".join(\"x{} {}\".format(item[1], item[0]) for item in rows)\n return result\n else:\n return rows\n\n\ndef setup(bot):\n bot.add_cog(Inventory(bot))\n","sub_path":"inventory.py","file_name":"inventory.py","file_ext":"py","file_size_in_byte":11293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"591473583","text":"import getpass\r\nimport imp\r\nimport json\r\nimport logging\r\nimport os\r\nimport pathlib\r\nimport shutil\r\nimport subprocess\r\nfrom subprocess import Popen, CalledProcessError, PIPE\r\nimport sys\r\nimport tempfile\r\nfrom typing import (\r\n Optional, Tuple, Dict, NamedTuple, Callable,\r\n Collection, List, Union, Any\r\n)\r\nfrom urllib import parse, request\r\nimport uuid\r\nimport zipfile\r\nimport pyarrow\r\nimport setuptools\r\n\r\nfrom pex.pex_builder import PEXBuilder\r\nfrom pex.resolver import resolve_multi, Unsatisfiable\r\ntry:\r\n from pex.resolver import Untranslatable\r\nexcept ImportError:\r\n # keep compatibility with pex 2.1.1\r\n from pex.resolver import Untranslateable as Untranslatable\r\n\r\nfrom pex.pex_info import PexInfo\r\nfrom pex.interpreter import PythonInterpreter\r\nfrom pex.inherit_path import InheritPath\r\n\r\nfrom cluster_pack import filesystem, conda\r\n\r\nCRITEO_PYPI_URL = \"http://build-nexus.prod.crto.in/repository/moab.pypi/simple\"\r\n\r\nCONDA_DEFAULT_ENV = 'CONDA_DEFAULT_ENV'\r\n\r\nEDITABLE_PACKAGES_INDEX = 'editable_packages_index'\r\n\r\n_logger = logging.getLogger(__name__)\r\n\r\nJsonDictType = Dict[str, Any]\r\n\r\n\r\ndef _get_tmp_dir() -> str:\r\n tmp_dir = f\"/tmp/{uuid.uuid1()}\"\r\n _logger.debug(f\"local tmp_dir {tmp_dir}\")\r\n os.makedirs(tmp_dir, exist_ok=True)\r\n return tmp_dir\r\n\r\n\r\ndef zip_path(py_dir: str, include_base_name: bool = True, tmp_dir: str = _get_tmp_dir()) -> str:\r\n \"\"\"\r\n Zip current directory\r\n\r\n :param py_dir: directory to zip\r\n :param include_base_name: include the basename of py_dir into the archive (\r\n for skein zip files it should be False,\r\n for pyspark zip files it should be True)\r\n :return: destination of the archive\r\n \"\"\"\r\n py_archive = os.path.join(\r\n tmp_dir,\r\n os.path.basename(py_dir) + '.zip'\r\n )\r\n\r\n with zipfile.ZipFile(py_archive, 'w', zipfile.ZIP_DEFLATED) as zipf:\r\n for root, dirs, files in os.walk(py_dir):\r\n for file in files:\r\n # do not include .pyc files, it makes the import\r\n # fail for no obvious reason\r\n if not file.endswith(\".pyc\"):\r\n zipf.write(\r\n os.path.join(root, file),\r\n os.path.join(\r\n os.path.basename(py_dir) if include_base_name else \"\",\r\n os.path.relpath(root, py_dir),\r\n file\r\n )\r\n if root != py_dir\r\n else os.path.join(\r\n os.path.basename(root) if include_base_name else \"\",\r\n file\r\n ))\r\n return py_archive\r\n\r\n\r\ndef format_requirements(requirements: Dict[str, str]) -> List[str]:\r\n if requirements is None:\r\n return list()\r\n else:\r\n return [name + \"==\" + version\r\n if version else name\r\n for name, version in requirements.items()]\r\n\r\n\r\n# from https://github.com/pantsbuild/pex/blob/451977efdf987dd299a1b4798ac2ee298cd6d61b/\r\n# pex/bin/pex.py#L644\r\ndef _walk_and_do(fn: Callable, src_dir: str) -> None:\r\n src_dir = os.path.normpath(src_dir)\r\n for root, dirs, files in os.walk(src_dir):\r\n for f in files:\r\n src_file_path = os.path.join(root, f)\r\n dst_path = os.path.relpath(src_file_path, src_dir)\r\n fn(src_file_path, dst_path)\r\n\r\n\r\ndef pack_spec_in_pex(spec_file: str,\r\n output: str,\r\n pex_inherit_path: str = \"prefer\") -> str:\r\n with open(spec_file, \"r\") as f:\r\n lines = [line for line in f.read().splitlines()\r\n if line and not line.startswith(\"#\")]\r\n _logger.debug(f\"used requirements: {lines}\")\r\n return pack_in_pex(lines, output, pex_inherit_path=pex_inherit_path)\r\n\r\n\r\ndef pack_in_pex(requirements: List[str],\r\n output: str,\r\n ignored_packages: Collection[str] = [],\r\n pex_inherit_path: str = \"prefer\",\r\n editable_requirements: Dict[str, str] = {}\r\n ) -> str:\r\n \"\"\"\r\n Pack current environment using a pex.\r\n\r\n :param requirements: list of requirements (ex {'tensorflow': '1.15.0'})\r\n :param output: location of the pex\r\n :param ignored_packages: packages to be exluded from pex\r\n :param pex_inherit_path: see https://github.com/pantsbuild/pex/blob/master/pex/bin/pex.py#L264,\r\n possible values ['false', 'fallback', 'prefer']\r\n :return: destination of the archive, name of the pex\r\n \"\"\"\r\n\r\n interpreter = PythonInterpreter.get()\r\n pex_info = PexInfo.default(interpreter)\r\n pex_info.inherit_path = InheritPath.for_value(pex_inherit_path)\r\n pex_builder = PEXBuilder(\r\n interpreter=interpreter,\r\n pex_info=pex_info)\r\n\r\n for current_package in editable_requirements.values():\r\n _logger.debug(\"Add current path as source\", current_package)\r\n _walk_and_do(pex_builder.add_source, current_package)\r\n\r\n try:\r\n resolveds = resolve_multi(\r\n requirements=requirements,\r\n indexes=[CRITEO_PYPI_URL] if _is_criteo() else None)\r\n\r\n for resolved in resolveds:\r\n if resolved.distribution.key in ignored_packages:\r\n _logger.debug(f\"Ignore requirement {resolved.distribution}\")\r\n continue\r\n else:\r\n _logger.debug(f\"Add requirement {resolved.distribution}\")\r\n pex_builder.add_distribution(resolved.distribution)\r\n if (resolved.direct_requirement):\r\n pex_builder.add_requirement(resolved.direct_requirement)\r\n except (Unsatisfiable, Untranslatable):\r\n _logger.exception('Cannot create pex')\r\n raise\r\n\r\n pex_builder.build(output)\r\n\r\n return output\r\n\r\n\r\ndef _get_packages(editable: bool, executable: str = sys.executable) -> List[JsonDictType]:\r\n editable_mode = \"-e\" if editable else \"--exclude-editable\"\r\n results = subprocess.check_output(\r\n [f\"{executable}\", \"-m\", \"pip\", \"list\", \"-l\",\r\n f\"{editable_mode}\", \"--format\", \"json\", \"-v\"]).decode()\r\n\r\n _logger.debug(f\"'pip list' with editable={editable} results:\" + results)\r\n\r\n parsed_results = json.loads(results)\r\n\r\n # https://pip.pypa.io/en/stable/reference/pip_freeze/?highlight=freeze#cmdoption--all\r\n # freeze hardcodes to ignore those packages: wheel, distribute, pip, setuptools\r\n # To be iso with freeze we also remove those packages\r\n return [element for element in parsed_results\r\n if element[\"name\"] not in\r\n [\"distribute\", \"wheel\", \"pip\", \"setuptools\"]]\r\n\r\n\r\nclass Packer(object):\r\n def env_name(self) -> str:\r\n raise NotImplementedError\r\n\r\n def extension(self) -> str:\r\n raise NotImplementedError\r\n\r\n def pack(self,\r\n output: str,\r\n reqs: List[str],\r\n additional_packages: Dict[str, str],\r\n ignored_packages: Collection[str],\r\n editable_requirements: Dict[str, str]) -> str:\r\n raise NotImplementedError\r\n\r\n def pack_from_spec(self,\r\n spec_file: str,\r\n output: str) -> str:\r\n raise NotImplementedError\r\n\r\n\r\ndef get_env_name(env_var_name: str) -> str:\r\n \"\"\"\r\n Return default virtual env\r\n \"\"\"\r\n virtual_env_path = os.environ.get(env_var_name)\r\n if not virtual_env_path:\r\n return 'default'\r\n else:\r\n return os.path.basename(virtual_env_path)\r\n\r\n\r\nclass CondaPacker(Packer):\r\n def env_name(self) -> str:\r\n return pathlib.Path(sys.executable).parents[1].name\r\n\r\n def extension(self) -> str:\r\n return 'tar.gz'\r\n\r\n def pack(self,\r\n output: str,\r\n reqs: List[str],\r\n additional_packages: Dict[str, str],\r\n ignored_packages: Collection[str],\r\n editable_requirements: Dict[str, str]) -> str:\r\n return conda.pack_venv_in_conda(\r\n self.env_name(),\r\n reqs,\r\n len(additional_packages) > 0 or len(ignored_packages) > 0,\r\n output)\r\n\r\n def pack_from_spec(self,\r\n spec_file: str,\r\n output: str) -> str:\r\n return conda.create_and_pack_conda_env(\r\n spec_file=spec_file,\r\n reqs=None,\r\n output=output)\r\n\r\n\r\nclass PexPacker(Packer):\r\n def env_name(self) -> str:\r\n return get_env_name('VIRTUAL_ENV')\r\n\r\n def extension(self) -> str:\r\n return 'pex'\r\n\r\n def pack(self,\r\n output: str,\r\n reqs: List[str],\r\n additional_packages: Dict[str, str],\r\n ignored_packages: Collection[str],\r\n editable_requirements: Dict[str, str]) -> str:\r\n return pack_in_pex(reqs,\r\n output,\r\n ignored_packages,\r\n editable_requirements=editable_requirements)\r\n\r\n def pack_from_spec(self,\r\n spec_file: str,\r\n output: str) -> str:\r\n return pack_spec_in_pex(spec_file=spec_file, output=output)\r\n\r\n\r\nCONDA_PACKER = CondaPacker()\r\nPEX_PACKER = PexPacker()\r\n\r\n\r\ndef _get_editable_requirements(executable: str = sys.executable) -> List[str]:\r\n top_level_pkgs = []\r\n for pkg in _get_packages(True, executable):\r\n for _pkg in setuptools.find_packages(pkg[\"location\"]):\r\n if \".\" in _pkg:\r\n continue\r\n imported = __import__(_pkg)\r\n top_level_pkgs.append(os.path.dirname(imported.__file__))\r\n return top_level_pkgs\r\n\r\n\r\ndef get_non_editable_requirements(executable: str = sys.executable) -> Dict[str, str]:\r\n return {package[\"name\"]: package[\"version\"]\r\n for package in _get_packages(False, executable)}\r\n\r\n\r\ndef detect_archive_names(\r\n packer: Packer,\r\n package_path: str = None\r\n) -> Tuple[str, str, str]:\r\n if _running_from_pex():\r\n pex_file = get_current_pex_filepath()\r\n env_name = os.path.basename(pex_file).split('.')[0]\r\n else:\r\n pex_file = \"\"\r\n env_name = packer.env_name()\r\n\r\n if not package_path:\r\n package_path = (f\"{get_default_fs()}/user/{getpass.getuser()}\"\r\n f\"/envs/{env_name}.{packer.extension()}\")\r\n else:\r\n if \"\".join(pathlib.Path(package_path).suffixes) != f\".{packer.extension()}\":\r\n raise ValueError(f\"{package_path} has the wrong extension\"\r\n f\", .{packer.extension()} is expected\")\r\n\r\n return package_path, env_name, pex_file\r\n\r\n\r\ndef detect_packer_from_spec(spec_file: str) -> Packer:\r\n if os.path.basename(spec_file) == \"requirements.txt\":\r\n return PEX_PACKER\r\n elif spec_file.endswith(\".yaml\") or spec_file.endswith(\".yml\"):\r\n return CONDA_PACKER\r\n else:\r\n raise ValueError(f\"Archive format {spec_file} unsupported. \"\r\n \"Must be requirements.txt or conda .yaml\")\r\n\r\n\r\ndef detect_packer_from_env() -> Packer:\r\n if _is_conda_env():\r\n return CONDA_PACKER\r\n else:\r\n return PEX_PACKER\r\n\r\n\r\ndef detect_packer_from_file(zip_file: str) -> Packer:\r\n if zip_file.endswith('.pex'):\r\n return PEX_PACKER\r\n elif zip_file.endswith(\".zip\") or zip_file.endswith(\".tar.gz\"):\r\n return CONDA_PACKER\r\n else:\r\n raise ValueError(f\"Archive format {zip_file} unsupported. \"\r\n \"Must be .pex or conda .zip/.tar.gz\")\r\n\r\n\r\ndef get_current_pex_filepath() -> str:\r\n \"\"\"\r\n If we run from a pex, returns the path\r\n \"\"\"\r\n import _pex\r\n return os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(_pex.__file__))))\r\n\r\n\r\ndef get_editable_requirements(\r\n executable: str = sys.executable,\r\n editable_packages_dir: str = os.getcwd()\r\n) -> Dict[str, str]:\r\n editable_requirements: Dict[str, str] = {}\r\n if _running_from_pex():\r\n try:\r\n package_names = open(\r\n f\"{editable_packages_dir}/{EDITABLE_PACKAGES_INDEX}\"\r\n ).read().splitlines()\r\n except FileNotFoundError:\r\n editable_requirements = {}\r\n else:\r\n for package_name in package_names:\r\n try:\r\n _, path, _ = imp.find_module(package_name)\r\n editable_requirements[os.path.basename(path)] = path\r\n except ImportError:\r\n _logger.error(f\"Could not import package {package_name}\"\r\n f\" repo exists={os.path.exists(package_name)}\")\r\n else:\r\n editable_requirements = {os.path.basename(requirement_dir): requirement_dir\r\n for requirement_dir in _get_editable_requirements(executable)}\r\n\r\n _logger.info(f\"found editable requirements {editable_requirements}\")\r\n return editable_requirements\r\n\r\n\r\ndef get_default_fs() -> str:\r\n return subprocess.check_output(\"hdfs getconf -confKey fs.defaultFS\".split()).strip().decode()\r\n\r\n\r\ndef _is_conda_env() -> bool:\r\n return os.environ.get(CONDA_DEFAULT_ENV) is not None\r\n\r\n\r\ndef _running_from_pex() -> bool:\r\n try:\r\n import _pex\r\n return True\r\n except ModuleNotFoundError:\r\n return False\r\n\r\n\r\ndef _is_criteo() -> bool:\r\n return \"CRITEO_ENV\" in os.environ\r\n","sub_path":"cluster_pack/packaging.py","file_name":"packaging.py","file_ext":"py","file_size_in_byte":13423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"72813059","text":"import tensorflow as tf\nimport tensorflow_addons as tfa\nfrom tensorflow.keras import layers\n\nimport settings\n\n\nclass PWCNet:\n def __init__(self, total_shape, conv2d):\n self.total_shape = total_shape\n self.conv2d_f = conv2d\n print(\"[FlowNet] convolution default options:\", vars(conv2d))\n\n def __call__(self):\n batch, snippet, height, width, channel = self.total_shape\n numsrc = snippet - 1\n input_shape = (snippet * height, width, channel)\n input_tensor = layers.Input(shape=input_shape, batch_size=batch, name=\"depthnet_input\")\n # target: [batch, height, width, channel]\n # source: [batch*num_src, height, width, channel]\n target, sources = self.split_target_and_sources(input_tensor)\n\n # encode left (target) and right (source) image\n c1l, c2l, c3l, c4l, c5l, c6l = self.pwc_encode(target, \"_l\")\n c1r, c2r, c3r, c4r, c5r, c6r = self.pwc_encode(sources, \"_r\")\n\n # repeate target numsrc times -> [batch*num_src, height//scale, width//scale, channel]\n c1l, c2l, c3l, c4l, c5l, c6l = self.repeat_features((c1l, c2l, c3l, c4l, c5l, c6l), numsrc)\n\n corr6 = self.correlation(c6l, c6r)\n flow6, up_flow6, up_feat6 = self.predict_flow(corr6, \"flow6\")\n\n flow5, up_flow5, up_feat5 = self.upconv_flow(5, c5l, c5r, 0.625, up_flow6, up_feat6)\n flow4, up_flow4, up_feat4 = self.upconv_flow(4, c4l, c4r, 1.25, up_flow5, up_feat5)\n flow3, up_flow3, up_feat3 = self.upconv_flow(3, c3l, c3r, 2.5, up_flow4, up_feat4)\n flow2, flow_feat2 = self.upconv_flow(2, c2l, c2r, 5.0, up_flow3, up_feat3, up=False)\n\n flow2 = self.context_network(flow_feat2, flow2)\n flows_ms = [flow2, flow3, flow4, flow5, flow6]\n\n # reshape back to normal bactch size\n # -> list of [batch, num_src, height//scale, width//scale, channel]\n flows_ms = self.reshape_batch_back(flows_ms)\n pwcnet = tf.keras.Model(inputs=input_tensor, outputs={\"flows_ms\": flows_ms}, name=\"PWCNet\")\n return pwcnet\n\n def split_target_and_sources(self, input_tensor):\n \"\"\"\n :param input_tensor [batch, snippet*height, width, 3]\n :return: target [batch, height, width, 3]\n source [batch*numsrc, height, width, 3]\n \"\"\"\n batch, snippet, height, width, channel = self.total_shape\n numsrc = snippet - 1\n target = input_tensor[:, numsrc*height:]\n sources = input_tensor[:, :numsrc*height]\n sources = tf.reshape(sources, (batch*numsrc, height, width, channel))\n return target, sources\n\n def pwc_encode(self, x, suffix):\n c1 = self.conv2d_f(x, 16, 3, 2, name=\"pwc_conv1a\" + suffix)\n c1 = self.conv2d_f(c1, 16, 3, 1, name=\"pwc_conv1b\" + suffix)\n c1 = self.conv2d_f(c1, 16, 3, 1, name=\"pwc_conv1c\" + suffix)\n c2 = self.conv2d_f(c1, 32, 3, 2, name=\"pwc_conv2a\" + suffix)\n c2 = self.conv2d_f(c2, 32, 3, 1, name=\"pwc_conv2b\" + suffix)\n c2 = self.conv2d_f(c2, 32, 3, 1, name=\"pwc_conv2c\" + suffix)\n c3 = self.conv2d_f(c2, 64, 3, 2, name=\"pwc_conv3a\" + suffix)\n c3 = self.conv2d_f(c3, 64, 3, 1, name=\"pwc_conv3b\" + suffix)\n c3 = self.conv2d_f(c3, 64, 3, 1, name=\"pwc_conv3c\" + suffix)\n c4 = self.conv2d_f(c3, 96, 3, 2, name=\"pwc_conv4a\" + suffix)\n c4 = self.conv2d_f(c4, 96, 3, 1, name=\"pwc_conv4b\" + suffix)\n c4 = self.conv2d_f(c4, 96, 3, 1, name=\"pwc_conv4c\" + suffix)\n c5 = self.conv2d_f(c4, 128, 3, 2, name=\"pwc_conv5a\" + suffix)\n c5 = self.conv2d_f(c5, 128, 3, 1, name=\"pwc_conv5b\" + suffix)\n c5 = self.conv2d_f(c5, 128, 3, 1, name=\"pwc_conv5c\" + suffix)\n c6 = self.conv2d_f(c5, 196, 3, 2, name=\"pwc_conv6a\" + suffix)\n c6 = self.conv2d_f(c6, 196, 3, 1, name=\"pwc_conv6b\" + suffix)\n c6 = self.conv2d_f(c6, 196, 3, 1, name=\"pwc_conv6c\" + suffix)\n return c1, c2, c3, c4, c5, c6\n\n def repeat_features(self, features, numsrc):\n rep_feats = []\n for feat in features:\n batch, height, width, channel = feat.get_shape()\n # feat -> [batch, 1, height, width, channel]\n feat = tf.expand_dims(feat, 1)\n # feat -> [batch, numsrc, height, width, channel]\n feat = tf.tile(feat, (1, numsrc, 1, 1, 1))\n # feat -> [batch*numsrc, height, width, channel]\n feat = tf.reshape(feat, (batch*numsrc, height, width, channel))\n rep_feats.append(feat)\n return tuple(rep_feats)\n\n def reshape_batch_back(self, flows_ms):\n batch, snippet = self.total_shape[:2]\n numsrc = snippet - 1\n rsp_flows_ms = []\n for flow in flows_ms:\n _, height, width, channel = flow.get_shape()\n rsp_flow = tf.reshape(flow, (batch, numsrc, height, width, channel))\n rsp_flows_ms.append(rsp_flow)\n return rsp_flows_ms\n\n def upconv_flow(self, p, cp_l, cp_r, flow_scale, up_flowq, up_featq, up=True):\n \"\"\"\n :param p: current layer level, q = p+1 (lower resolution level)\n :param cp_l: p-th encoded feature from left image [batch, height//2^p, width//2^p, channel_p]\n :param cp_r: p-th encoded feature from left image [batch, height//2^p, width//2^p, channel_p]\n :param flow_scale: flow scale factor for flow scale to be 1/20\n :param up_flowq: upsampled flow from q-th level [batch, height//2^p, width//2^p, 2]\n :param up_featq: upsampled flow from q-th level [batch, height//2^p, width//2^p, channel_q]\n :param up: whether to return upsample flow and feature\n :return:\n \"\"\"\n # TODO: [ERROR] the below function results in TypeError:\n # '''\n # cp_r_warp = tfa.image.dense_image_warp(cp_r, up_flowq * flow_scale, name=f\"pwc_flow{p}_warp\")\n # '''\n # TypeError: An op outside of the function building code is being passed a \"Graph\" tensor. ~~~\n # there might be a bug in tfa.image.dense_image_warp(),\n # so the function is enclosed in layers.Lambda()\n cp_r_warp = layers.Lambda(lambda inputs: tfa.image.dense_image_warp(\n inputs[0], inputs[1]*flow_scale),\n name=f\"pwc_flow{p}_warp\")([cp_r, up_flowq])\n corrp = self.correlation(cp_l, cp_r_warp, name=f\"pwc_flow{p}_corr\")\n return self.predict_flow([corrp, cp_l, up_flowq, up_featq], f\"flow{p}\", up)\n\n def predict_flow(self, inputs, tag, up=True):\n x = tf.concat(inputs, axis=-1)\n c = self.conv2d_f(x, 128, name=f\"pwc_{tag}_c1\")\n x = tf.concat([x, c], axis=-1)\n c = self.conv2d_f(x, 128, name=f\"pwc_{tag}_c2\")\n x = tf.concat([x, c], axis=-1)\n c = self.conv2d_f(x, 96, name=f\"pwc_{tag}_c3\")\n x = tf.concat([x, c], axis=-1)\n c = self.conv2d_f(x, 64, name=f\"pwc_{tag}_c4\")\n x = tf.concat([x, c], axis=-1)\n c = self.conv2d_f(x, 32)\n flow = self.conv2d_f(c, 2, activation=\"linear\", name=f\"pwc_{tag}_out\")\n\n if up:\n up_flow = layers.Conv2DTranspose(2, kernel_size=4, strides=2, padding=\"same\",\n name=f\"pwc_{tag}_ct1\")(flow)\n up_feat = layers.Conv2DTranspose(2, kernel_size=4, strides=2, padding=\"same\",\n name=f\"pwc_{tag}_ct2\")(c)\n return flow, up_flow, up_feat\n else:\n return flow, c\n\n def context_network(self, x, flow):\n c = self.conv2d_f(x, 128, 3, dilation_rate=1, name=\"pwc_context_1\")\n c = self.conv2d_f(c, 128, 3, dilation_rate=2, name=\"pwc_context_2\")\n c = self.conv2d_f(c, 128, 3, dilation_rate=4, name=\"pwc_context_3\")\n c = self.conv2d_f(c, 96, 3, dilation_rate=8, name=\"pwc_context_4\")\n c = self.conv2d_f(c, 64, 3, dilation_rate=16, name=\"pwc_context_5\")\n c = self.conv2d_f(c, 32, 3, dilation_rate=1, name=\"pwc_context_6\")\n refined_flow = self.conv2d_f(c, 2, activation=\"linear\", name=f\"pwc_context_7\") + flow\n return refined_flow\n\n def correlation(self, cl, cr, ks=1, md=4, name=\"\"):\n corr = tfa.layers.CorrelationCost(kernel_size=ks, max_displacement=md, stride_1=1, stride_2=1,\n pad=md + ks//2, data_format=\"channels_last\", name=name\n )([cl, cr])\n return corr\n\n\n# ===== TEST FUNCTIONS\n\nimport numpy as np\n\n\ndef test_correlation():\n print(\"\\n===== start test_correlation\")\n batch, height, width, channel = (8, 100, 200, 10)\n cl = tf.random.uniform((batch, height, width, channel), -2, 2)\n cr = tf.random.uniform((batch, height, width, channel), -2, 2)\n print(\"input shape:\", (batch, height, width, channel))\n ks, md = 1, 5\n\n # EXECUTE\n corr = tfa.layers.CorrelationCost(kernel_size=ks, max_displacement=md, stride_1=1, stride_2=1,\n pad=md + ks // 2, data_format=\"channels_last\")([cl, cr])\n\n # TEST\n corr_shape = (batch, height, width, (2*md + 1)**2)\n assert corr.get_shape() == corr_shape, f\"correlation shape: {corr.get_shape()} != {corr_shape}\"\n print(\"correlation shape:\", corr.get_shape())\n\n # manually compute correlation at (md+v, md+u) but NOT same with corr\n u, v = 1, 1\n cr_shift = tf.pad(cr[:, v:, u:, :], [[0, 0], [v, 0], [u, 0], [0, 0]])\n corr_man = cl * cr_shift\n corr_man = layers.AveragePooling2D(pool_size=(ks, ks), strides=1, padding=\"same\")(corr_man)\n corr_man = tf.reduce_mean(corr_man, axis=-1)\n print(\"corr_man shape:\", corr_man.get_shape())\n\n print(\"!!! test_correlation passed\")\n\n\ndef test_warp_simple():\n print(\"\\n===== start test_warp_simple\")\n batch, height, width, channel = (8, 100, 200, 10)\n im = tf.random.uniform((batch, height, width, channel), -2, 2)\n dy, dx = 1.5, 0.5\n flow = tf.stack([tf.ones((batch, height, width)) * dy, tf.ones((batch, height, width)) * dx], axis=-1)\n\n # EXECUTE\n warp_tfa = tfa.image.dense_image_warp(im, flow)\n\n # flow is applied in a negative way\n warp_man = (im[:, 9:19, 10:20, :] + im[:, 8:18, 10:20, :] + im[:, 9:19, 9:19, :]\n + im[:, 8:18, 9:19, :]) / 4.\n print(\"warp_tfa:\", warp_tfa[1, 10:15, 10:15, 1].numpy())\n print(\"warp_man:\", warp_man[1, 0:5, 0:5, 1].numpy())\n assert np.isclose(warp_tfa[1, 10:15, 10:15, 1].numpy(), warp_man[1, 0:5, 0:5, 1].numpy()).all()\n print(\"!!! test_warp_simple passed\")\n\n\ndef test_warp_multiple():\n print(\"\\n===== start test_warp_simple\")\n\n for k in range(1, 10):\n batch, height, width, channel = (8, 100*5, 200, 10)\n im = tf.random.uniform((batch, height, width, channel), -2, 2)\n dy, dx = 1.5, 0.5\n flow = tf.stack([tf.ones((batch, height, width)) * dy, tf.ones((batch, height, width)) * dx], axis=-1)\n\n # EXECUTE\n warp_tfa = tfa.image.dense_image_warp(im, flow)\n print(\"dense_image_warp without name\", warp_tfa.get_shape())\n\n # TODO: WARNING!! the below loop results in warnings like\n # \"WARNING:tensorflow:5 out of the last 13 calls to \n # triggered tf.function retracing. ~~~\"\n # It seems like a bug and it happens only in the eager execution mode\n for k in range(1, 10):\n batch, height, width, channel = (8, 100*5, 200, 10)\n im = tf.random.uniform((batch, height, width, channel), -2, 2)\n dy, dx = 1.5, 0.5\n flow = tf.stack([tf.ones((batch, height, width)) * dy, tf.ones((batch, height, width)) * dx], axis=-1)\n\n # EXECUTE\n warp_tfa = tfa.image.dense_image_warp(im, flow, name=f\"warp{k}\")\n print(\"dense_image_warp with name\", warp_tfa.get_shape())\n\n print(\"!!! test_warp_simple passed\")\n\n\ndef test_conv2d_5dtensor():\n print(\"\\n===== start test_conv2d_5dtensor\")\n input_shape = (8, 4, 100, 200, 10)\n input_tensor = tf.random.uniform(input_shape, -2, 2)\n conv_layer = tf.keras.layers.Conv2D(100, 3, 1, \"same\")\n try:\n output_tensor = conv_layer(input_tensor)\n print(output_tensor.get_shape())\n print(\"!!! test_pwcnet passed\")\n except ValueError as ve:\n print(\"ERROR!!\")\n print(\"[test_conv2d_5dtensor]\", ve)\n\n\ndef test_layer_input():\n print(\"\\n===== start test_layer_input\")\n batch, numsrc, height, width, channel = (8, 4, 100, 200, 10)\n input_tensor = layers.Input(shape=(height, width, channel), batch_size=batch*numsrc, name=\"input_tensor\")\n print(\"input tensor shape:\", input_tensor.get_shape())\n assert input_tensor.get_shape() == (batch*numsrc, height, width, channel)\n print(\"!!! test_layer_input passed\")\n\n\ndef test_reshape_tensor():\n print(\"\\n===== start test_reshape_tensor\")\n batch, numsrc, height, width, channel = (8, 4, 100, 200, 10)\n src_tensor = tf.random.uniform((batch, numsrc*height, width, channel), -2, 2)\n dst_tensor = tf.reshape(src_tensor, (batch*numsrc, height, width, channel))\n batidx = 1\n srcidx = 1\n print(\"src data:\\n\", src_tensor[batidx, height*srcidx + 5:height*srcidx + 10, 5:10, 3].numpy())\n print(\"src data:\\n\", dst_tensor[numsrc*batidx + srcidx, 5:10, 5:10, 3].numpy())\n assert np.isclose(src_tensor[batidx, height*srcidx:height*(srcidx+1)].numpy(),\n dst_tensor[numsrc*batidx + srcidx, :].numpy()).all()\n print(\"!!! test_reshape_tensor passed\")\n\n\nimport model.build_model.model_utils as mu\n\n\ndef test_lambda_layer():\n print(\"\\n===== start test_lambda_layer\")\n batch, height, width, channel = (8, 100, 200, 10)\n x = tf.random.uniform((batch, height, width, channel), -2, 2)\n conv2d = mu.CustomConv2D(activation=layers.LeakyReLU(0.1))\n y = convnet(conv2d, x)\n print(\"normally build convnet, y shape:\", y.get_shape())\n\n try:\n y = layers.Lambda(lambda inputs: convnet(conv2d, inputs),\n name=f\"convnet\")(x)\n print(\"lambda layer output, y shape:\", y.get_shape())\n print(\"!!! test_lambda_layer passed\")\n except ValueError as ve:\n print(\"[test_lambda_layer]\", ve)\n\n\ndef convnet(conv_op, x):\n c = conv_op(x, 3)\n c = conv_op(c, 5)\n c = conv_op(c, 1, strides=2)\n return c\n\n\ndef test_pwcnet():\n print(\"\\n===== start test_pwcnet\")\n total_shape = batch, snippet, height, width, channel = (8, 4, 128, 256, 10)\n input_tensor = tf.random.uniform((batch, snippet*height, width, channel), -2, 2)\n conv2d = mu.conv2d_func_factory(activation=layers.LeakyReLU(0.1))\n\n # EXECUTE\n pwc_net = PWCNet(total_shape, conv2d)()\n pwc_net.summary()\n\n flows = run_net(pwc_net, input_tensor)\n for flow in flows:\n print(\"PWCNet flow shape:\", flow.get_shape())\n assert flows[0].get_shape() == (batch, snippet - 1, height // 4, width // 4, 2)\n assert flows[1].get_shape() == (batch, snippet - 1, height // 8, width // 8, 2)\n print(\"!!! test_pwcnet passed\")\n\n\n# @tf.function\ndef run_net(net, input_tensor):\n return net(input_tensor)\n\n\nif __name__ == \"__main__\":\n # test_correlation()\n # test_warp_simple()\n # test_warp_multiple()\n # test_conv2d_5dtensor()\n # test_layer_input()\n # test_reshape_tensor()\n # test_lambda_layer()\n test_pwcnet()\n\n","sub_path":"model/build_model/flow_net.py","file_name":"flow_net.py","file_ext":"py","file_size_in_byte":15180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"644472265","text":"#!/usr/bin/python3\n# coding: iso-8859-1\n\n\"\"\"user\"\"\"\n\nimport os, time, sys , select\nfrom msvcrt import getch\n\n# routeurprécédent<>adressefinale<>expéditeur<>message\n# 0 1 2 3\n\n\"\"\"il faudra traiter le cas où l'ordinateur destination n'est pas (du tout) accessible\"\"\"\n\n\ndef url(num): return \"taches/joueur\"+ str(num)+\".txt\"\nclass Ordi :\n def __init__(self, num):\n self.num = num\n self.url = url(num)\n #os.remove(self.url) # on vire l'ancien fichier\n self.fichier = open(self.url, \"w\")\n self.fichier.close()\n \n def envoi(self, param): # le facteur sera forcément un voisin de self\n phrase = str(self.num) +\"<>\"+ param[1] +\"<>\"+ param[2] +\"<>\"+ param[3]\n facteur = self.route(int(param[1])) # on cherche par qui il faut passer\n f=open(url(facteur),\"a\")\n f.write(phrase + \"\\n\")\n f.close()\n return True\n \n def route(self, dest): #bricole moche pour commencer : le mec est un voisin direct !\n fichier=open(\"table.txt\",'r')\n table=fichier.read()\n fichier.close()\n if str(table[self.num-1][dest-1])==1:\n return(dest)\n \n def traitement(self):\n f=open(self.url, \"r\")\n tache = f.readline()\n contenu = f.read()\n f.close()\n f=open(self.url, \"w\")\n f.write(contenu)\n f.close()\n \n if tache != \"\":\n print (tache)\n print(self.table)\n param = tache.split(\"<>\") # /!\\ là c'est param[0...] et pas param['dest'...]\n # for i in range (2): param[i] = int(param[i])\n if(int(param[1]) == self.num):\n print(\"L'ordi \"+param[2]+\" vous envoie : \\\"\"+ param[3] +\"\\\".\")\n return \"\"\n # else:\n param[0] = self.num\n print(\"Vous transmettez un message de \"+param[2]+\" vers \"+param[1]+\".\\n\")\n self.envoi(param)\n\ndef ecrire():\n phrase = input(\"phrase ? \")\n dest = int(input(\"destinataire ?\"))\n ordi.envoi([str(ordi.num), str(dest), str(ordi.num), phrase])\n main()\n \ndef main():\n global QUITTER, chargement\n chargement[0] = chargement[0]%(len(chargement)-1) + 1\n print(chargement[chargement[0]], end=\"\\r\")\n ordi.traitement() \n def copain():\n #print(\"copain\")\n global nombre\n try: # déterminer si un nouveau copain est arrivé\n fichier = open(url(nombre), \"r\")\n print(\"l'Ordi \" + str(nombre) + \" a rejoint le réseau !\") \n nombre+=1\n main()\n except IOError:\n return(True) \n copain() \n try:\n #print(\"try\")\n if(QUITTER):\n os.remove(url(num))\n sys.exit(0)\n time.sleep(1)\n main()\n except KeyboardInterrupt:\n print(\"Echap pour quitter, Espace pour revenir en arrière, ailleurs pour écrire\")\n z=getch()\n if ord(z)==27: #Echap # Y avait plein de bugs donc j'ai dû faire des trucs louches\n QUITTER = True\n os.remove(url(num))\n sys.exit(0)\n elif ord(z)==32: #Espace\n main()\n else:\n ecrire()\n\nQUITTER = False\nchargement = [0,\" (O)c===8 \",\" (O) c===8\",\" (O)c===8 \",\" (Cc===8 \",\" A (C===8 \",\" Ah (C==8 \",\" Ah! (C===8 \",\" Ah! (Cc===8 \"]\ncmp=1\nos.system(\"cls\")\nwhile 1: #insertion dans le réseau + lancement\n try:\n fichier = open(url(cmp), \"r\")\n except IOError: #le fichier n'existe pas\n nombre = cmp\n num=cmp\n fichier=open(url(num),'w') #on le crée\n fichier.close()\n os.system(\"title User\" + str(num))\n print(\"vous êtes l'Ordi \" + str(num))\n ordi=Ordi(num)\n print(\"Faites Ctrl+C pour quitter ou écrire\",'\\n')\n main() # les choses sérieuses commencent, on quitte la place.\n cmp+=1\n\n \n\"\"\"fin user\"\"\"\n","sub_path":"user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":3534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"287906908","text":"one=[\"One\",\"Two\",\"Three\",\"Four\",\"Five\",\"Six\",\"Seven\",\n\"Eight\",\"Nine\"]\nten=[\"Twenty\",\"Thirty\",\"Fourty\",\"Fifty\",\"Sixty\",\n\"Seventy\",\"Eighty\",\"Ninety\"]\nteen=[\"Ten\",\"Eleven\",\"Twelve\",\"Thirteen\",\"Fourteen\",\n\"Fifteen\",\"Sixteen\",\"Seventeen\",\"Eighteen\",\"Nineteen\"]\ndef ones(x):\n\tif x==0:\n\t\treturn \"\"\n\telse:\n\t\treturn one[x-1]\ndef tens(x):\n\tif x==0:\n\t\treturn\"\"\n\telse:\n\t\treturn ten[x-2]\ndef teens(x):\n\treturn teen[x]\ndef hundreds(x):\n\tif x==0:\n\t\treturn\"\"\n\telse:\n\t\treturn one[x-1]+\" Hundred\"\ndef thousands(x):\n\tif x==0:\n\t\treturn\"\"\n\telse:\n\t\treturn one[x-1]+\" Thousand\"\nnumber=int(input(\"Please enter any number: \"))\nlength=len(str(number))\nanswer=\"\"\ndef answerones():\n\tglobal answer\n\tanswer=answer+ones(number%10)+\" \"\ndef answertens(x):\n\tglobal answer\n\tanswer=answer+tens(x)+\" \"\ndef answerteens(x):\n\tglobal answer\n\tanswer=answer+teens(x)+\" \"\ndef answerhundreds(x):\n\tglobal answer\n\tanswer+=hundreds(x)+\" and \"\ndef answerthousands(x):\n\tglobal answer\n\tanswer+=thousands(x)+\" \"\ndef numberones():\n\treturn number%10\nif length==1:\n\tif number==0:\n\t\tprint(\"Zero\")\n\telse:\n\t\tprint(ones(number))\nif length==2:\n\tt=int(number/10)\n\tif t==1:\n\t\tanswerteens(numberones())\n\telse:\n\t\tanswertens(t)\n\t\tanswerones()\n\tprint(answer)\nif length==3:\n\tt=int(number/10)%10\n\tif t==0 and numberones()==0:\n\t\tanswer+=hundreds(int(number/100))\n\telse:\n\t\tanswerhundreds(int(number/100))\n\tif t==1:\n\t\tanswerteens(numberones())\n\telse:\n\t\tanswertens(t)\n\tanswerones()\n\tprint(answer)\nif length==4:\n\tt=(int(number/10))%10\n\tif t==0 and numberones()==0:\n\t\tanswer+=thousands(int(number/1000))+\" \"\n\telse:\n\t\tanswerthousands(int(number/1000))\n\tif t==0 and numberones()==0:\n\t\tanswer+=hundreds((int(number/100))%10)\n\telse:\n\t\tanswerhundreds((int(number/100))%10)\n\tif t==1:\n\t\tanswerteens(numberones())\n\telse:\n\t\tanswertens(t)\n\tanswerones()\n\tprint(answer)","sub_path":"shafeeq/numbersaswords.py","file_name":"numbersaswords.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"636246550","text":"from __future__ import print_function\nimport os\n\nimport lsst.sims.photUtils.Bandpass as Bandpass\nimport lsst.sims.photUtils.Sed as Sed\n\nfrom lsst.sims.utils import angularSeparation\nfrom lsst.sims.utils import ModifiedJulianDate\nfrom lsst.sims.utils import ObservationMetaData\nfrom lsst.sims.coordUtils import chipNameFromRaDecLSST\nfrom lsst.utils import getPackageDir\n\nfrom lsst.sims.maf.db import OpsimDatabase\n\n__all__ = ['LsstObs']\n\nclass LsstObs(object):\n \"\"\"\n Class to generate actual LSST observations of a set of moving objects.\n Currently uses ChebyShev polynomials fit, should allow simple linear interpolation too.\n \"\"\"\n def __init__(self, logfile='obslog', cameraFootprint=True):\n self.logfile = open(logfile, 'w')\n # Set up camera object (used for footprint).\n self.epoch = 2000.0\n if cameraFootprint:\n self.fov = 2.1\n else:\n self.fov = 1.75\n self.colors = {}\n\n def setupFilters(self, filterDir=None, vDir=None,\n filterlist=('u', 'g', 'r', 'i', 'z', 'y')):\n \"\"\"\n Read LSST and Harris (V) filters.\n\n Parameters\n ----------\n filterDir : str (opt)\n Directory containing the filter throughput curves ('total_*.dat')\n Default set by 'LSST_THROUGHPUTS_BASELINE' env variable.\n vDir : str (opt)\n Directory containing the V band throughput curve.\n Default None = $SIMS_MOVINGOBJECTS_DIR/data.\n filterlist : list (opt)\n List containing the filter names to use to calculate colors.\n Default ('u', 'g', 'r', 'i', 'z', 'y')\n \"\"\"\n if filterDir is None:\n filterDir = os.getenv('LSST_THROUGHPUTS_BASELINE')\n if filterDir is None:\n raise ValueError('Please set filterDir or env variable LSST_THROUGHPUTS_BASELINE')\n if vDir is None:\n vDir = os.path.join(getPackageDir('SIMS_MOVINGOBJECTS'), 'data')\n self.filterlist = filterlist\n # Read filter throughput curves from disk.\n self.lsst = {}\n for f in self.filterlist:\n self.lsst[f] = Bandpass()\n self.lsst[f].readThroughput(os.path.join(filterDir, 'total_' + f + '.dat'))\n self.vband = Bandpass()\n self.vband.readThroughput(os.path.join(vDir, 'harris_V.dat'))\n\n def calcColors(self, sedname='C.dat', sedDir=None):\n \"\"\"\n Calculate the colors for a given SED, store the result.\n\n Parameters\n ----------\n sedname : str (opt)\n Name of the SED. Default 'C.dat'.\n sedDir : str (opt)\n Directory containing the SEDs of the moving objects.\n Default None = $SIMS_MOVINGOBJECTS_DIR/data.\n \"\"\"\n # See if the sed's colors are in memory already.\n if sedname not in self.colors:\n if sedDir is None:\n sedDir = os.path.join(getPackageDir('SIMS_MOVINGOBJECTS'), 'data')\n moSed = Sed()\n moSed.readSED_flambda(os.path.join(sedDir, sedname))\n vmag = moSed.calcMag(self.vband)\n self.colors[sedname] = {}\n for f in self.filterlist:\n self.colors[sedname][f] = moSed.calcMag(self.lsst[f]) - vmag\n return\n\n def calcMagLosses(self, velocity, seeing, texp=30.):\n \"\"\"\n Calculate the magnitude losses due to trailing and not matching the point-source detection filter.\n \"\"\"\n a_trail = 0.76\n b_trail = 1.16\n a_det = 0.42\n b_det = 0.00\n x = velocity * texp / seeing / 24.0\n dmagTrail = 1.25 * np.log10(1 + a_trail*x**2/(1+b_trail*x))\n dmagDetect = 1.25 * np.log10(1 + a_det*x**2 / (1+b_det*x))\n return dmagTrail, dmagDetect\n\n def readOpsim(self, opsimfile, constraint=None, dbcols=None, expMJDCol='expMJD'):\n # Read opsim database.\n opsdb = OpsimDatabase(opsimfile)\n if dbcols is None:\n dbcols = []\n # Be sure the minimum columns that we need are in place.\n # reqcols = ['expMJD', 'night', 'fieldRA', 'fieldDec', 'rotSkyPos', 'filter',\n # 'visitExpTime', 'finSeeing', 'fiveSigmaDepth', 'solarElong']\n reqcols = [expMJDCol, 'night', 'fieldRA', 'fieldDec', 'rotSkyPos', 'filter',\n 'visitExpTime', 'FWHMeff', 'FWHMgeom', 'fiveSigmaDepth', 'solarElong']\n for col in reqcols:\n if col not in dbcols:\n dbcols.append(col)\n simdata = opsdb.fetchMetricData(dbcols, sqlconstraint=constraint)\n print(\"Queried data from opsim %s, fetched %d visits.\" % (opsimfile, len(simdata[expMJDCol])),\n file=self.logfile)\n return simdata\n\n def _openOutput(self, outfileName):\n self.outfile = open(outfileName, 'w')\n self.wroteHeader = False\n\n\n #REWRITE\n def writeObs(self, objId, interpfuncs, simdata, idxObs, outfileName='out.txt',\n sedname='C.dat', tol=1e-8,\n seeingCol='FWHMgeom', expTimeCol='visitExpTime'):\n \"\"\"\n Call for each object; write out the observations of each object.\n \"\"\"\n # Return if there's nothing to write out.\n if len(idxObs) == 0:\n return\n # Open file if needed.\n try:\n self.outfile\n except AttributeError:\n self._openOutput(outfileName)\n # Calculate the ephemerides for the object, using the interpfuncs, for the times in simdata[idxObs].\n tvis = simdata['expMJD'][idxObs]\n ephs = np.recarray([len(tvis)], dtype=([('delta', '\n# print(reurl['geocodes'][0]['location']) ##输出坐标\n# return reurl\n if reurl['geocodes'][0]['location']:\n return reurl['geocodes'][0]['location']\n else:\n return None\n##测试在此:\n##应返回如下格式:\n#{\"status\":\"1\",\"info\":\"OK\",\"infocode\":\"10000\",\"count\":\"1\",\"geocodes\":[{\"formatted_address\":\"北京市东城区天安门\",\"country\":\"中国\",\"province\":\"北京市\",\"citycode\":\"010\",\"city\":\"北京市\",\"district\":\"东城区\",\"township\":[],\"neighborhood\":{\"name\":[],\"type\":[]},\"building\":{\"name\":[],\"type\":[]},\"adcode\":\"110101\",\"street\":[],\"number\":[],\"location\":\"116.397477,39.908692\",\"level\":\"兴趣点\"}]}\n\n#@@@@@@@@@@@@@@@@2、步行路径规划 @@@@@@@@@@@@@@@@@@@@@\n\n\ndef walkRoute(beg_lo,end_lo):\n urlcode=BASIC_URL+\"direction/walking?key=\"+API_KEY+\"&origin=\"+beg_lo+\"&destination=\"+end_lo\n rt=requests.get(urlcode)\n reurl=rt.json()\n restr=[]\n for paths in reurl[\"route\"][\"paths\"][0][\"steps\"]:\n restr.append(str(paths[\"instruction\"]))\n return restr\n\n#@@@@@@@@@@@@@@@@2、公交路径规划 @@@@@@@@@@@@@@@@@@@@@\n\ntransMod=0 ##0:最快捷模式;1:最经济模式;2:最少换乘模式;3:最少步行模式;5:不乘地铁模式\n\ndef transitRoute(beg_lo,end_lo,city,cityd):\n urlcode=BASIC_URL+\"direction/transit/integrated?key=\"+API_KEY+\"&origin=\"+beg_lo+\"&destination=\"+end_lo+\"&city=\"+city+\"&cityd=\"+cityd+\"&strategy=\"+str(transMod)\n rt=requests.get(urlcode)\n rejson=rt.json()\n# fo = open(origin+\"->\"+destination+\".json\", \"w\")\n# fo.write(r)\n# fo.close()\n pp=[]\n\n for meths in rejson[\"route\"][\"transits\"]:\n qq=[]\n for seg in meths[\"segments\"]:\n remths={}\n for key in seg.keys():\n road=\"\"\n \n if key==\"walking\":\n for ro in seg[key][\"steps\"]:\n road+=\"\"\n road+=ro[\"instruction\"]\n remths[key]=road\n if key==\"bus\":\n name=\"\"\n way=\"\"\n for mm in seg[key][\"buslines\"]:\n for nn in mm.keys():\n if nn==\"departure_stop\":\n \n way+=mm[nn][\"name\"]\n way+=\" --> \"\n elif nn==\"arrival_stop\":\n way+=mm[nn][\"name\"]\n way+=\": \"\n elif nn==\"name\":\n name+=\"\".join(mm[nn])\n road+=\"\"\n road=way+name\n remths[key]=road\n if remths:\n qq.append(remths)\n pp.append(qq)\n return pp\n\n ","sub_path":"request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":8786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"99475538","text":"from PyQt5 import QtCore, QtWidgets\n\n\nclass CustomLayout(QtWidgets.QLayout):\n \"\"\" A QLayout that you can set widgets on middle left, top left, top center, top right, middle right, bottom right,\n bottom center, bottom left and middle center position.\n\n Args:\n margin (tuple): tuple with int values to set (left, top, right, bottom) margin\n Default: (0, 0, 0, 0)\n\n spacing (int): spacing between the sections\n Default: (-1)\n Examples:\n # >>> from PyQt5 import QtWidgets\n # >>> class Window(QtWidgets.QWidget):\n # >>> def __init__(self):\n # >>> super().__init__()\n # >>> layout = CustomLayout(margin=(20, 20, 20, 20), spacing=50)\n # >>> bt = QtWidgets.QPushButton(\"Button\", self)\n # >>> layout.addWidget(bt, CustomLayout.BOTTOM_CENTER)\n # >>>\n # >>> label = QtWidgets.QLabel(\"Custom label\", self)\n # >>> layout.addWidget(label, CustomLayout.TOP_RIGHT)\n # >>>\n # >>> widget = QtWidgets.QWidget(self)\n # >>> horizontal_layout = QtWidgets.QHBoxLayout()\n # >>> bt_2 = QtWidgets.QPushButton(\"Button 2\", self)\n # >>> horizontal_layout.addWidget(bt_2)\n # >>> bt_3 = QtWidgets.QPushButton(\"Button 3\", self)\n # >>> horizontal_layout.addWidget(bt_3)\n # >>> widget.setLayout(horizontal_layout)\n # >>> layout.addLayout(widget)\n # >>>\n # >>> # Set the layout on window\n # >>> self.setLayout(self.layout)\n #\n \"\"\"\n\n TOP_LEFT, TOP_CENTER, TOP_RIGHT, BOTTOM_LEFT, BOTTOM_CENTER, BOTTOM_RIGHT, MIDDLE_LEFT, MIDDLE_CENTER\\\n , MIDDLE_RIGHT, BOTTOM = range(10)\n MINIMUM_SIZE, SIZE_HINT = range(2)\n\n def __init__(self, margin=(0, 0, 0, 0), spacing=-1, parent=None):\n super().__init__(parent)\n assert isinstance(margin, tuple) and len(margin) == 4 and all(isinstance(m, int) for m in margin), TypeError(\n \"margin should be a tuple with 4 values in int: margin = (left, top, right, bottom)\")\n\n self.margin_left, self.margin_top, self.margin_right, self.margin_bottom = margin\n\n self.setSpacing(spacing)\n self.list = []\n\n def setMenuBar(self, QWidget):\n self.margin_top += 5\n super().setMenuBar(QWidget)\n\n def __del__(self):\n l = self.takeAt(0)\n while l:\n l = self.takeAt(0)\n\n def addItem(self, item):\n self.add(item, CustomLayout.MIDDLE_LEFT)\n\n def addWidget(self, widget, position):\n self.add(QtWidgets.QWidgetItem(widget), position)\n\n def expandingDirections(self):\n return QtCore.Qt.Horizontal | QtCore.Qt.Vertical\n\n def hasHeightForWidth(self):\n return False\n\n def count(self):\n return len(self.list)\n\n def itemAt(self, index):\n if index < len(self.list):\n return self.list[index].item\n\n return None\n\n def minimumSize(self):\n return self.calculateSize(CustomLayout.MINIMUM_SIZE)\n\n def setGeometry(self, rect):\n center = None\n right_width = self.margin_right\n left_width = self.margin_left\n top_left_height = self.margin_top\n top_center_height = self.margin_top\n top_right_height = self.margin_top\n top_height = self.margin_top\n bottom_left_height = self.margin_bottom\n bottom_center_height = self.margin_bottom\n bottom_right_height = self.margin_bottom\n bottom_height = self.margin_bottom\n\n super().setGeometry(rect)\n\n for wrapper in self.list:\n item = wrapper.item\n position = wrapper.position\n\n # Divide the window width By 3\n _item_width_p = rect.width() / 3\n _item_width = item.sizeHint().width()\n\n if position == CustomLayout.TOP_CENTER:\n # get X position is the x position of window + the item width\n _x_position = rect.x() + _item_width_p + (_item_width_p - _item_width) / 2\n\n # Set the geometry of the widget\n item.setGeometry(QtCore.QRect(_x_position, top_center_height, _item_width, item.sizeHint().height()))\n\n # Get the current top center height\n top_center_height += item.geometry().height() + self.spacing()\n\n # Get the larger top height\n top_height = top_center_height if top_center_height > top_height else top_height\n\n elif position == CustomLayout.TOP_LEFT:\n # get X position is the x position of window\n _x_position = rect.x() + self.margin_left\n\n # Set the geometry of the widget\n item.setGeometry(QtCore.QRect(_x_position, top_left_height, _item_width, item.sizeHint().height()))\n\n # Get the current top center height\n top_left_height += item.geometry().height() + self.spacing()\n\n # Get the larger top height\n top_height = top_left_height if top_left_height > top_height else top_height\n\n elif position == CustomLayout.TOP_RIGHT:\n # get X position is the x position of window + 2 * the item width\n _x_position = rect.x() + 2 * _item_width_p\n _x_position += _item_width_p - _item_width - self.margin_right\n\n # Set the geometry of the widget\n item.setGeometry(QtCore.QRect(_x_position, top_right_height, _item_width, item.sizeHint().height()))\n\n # Get the current top right height\n top_right_height += item.geometry().height() + self.spacing()\n\n # Get the larger top height\n top_height = top_right_height if top_right_height > top_height else top_height\n\n elif position == CustomLayout.BOTTOM_LEFT:\n # get X position is the x position of window\n _x_position = rect.x() + self.margin_left\n\n # Set the abstract geometry position for get its height to define its Y position correctly\n item.setGeometry(\n QtCore.QRect(item.geometry().x(), item.geometry().y(), rect.width(), item.sizeHint().height()))\n\n # Get the current bottom left height\n bottom_left_height += item.geometry().height() + self.spacing()\n\n # Get the Y position\n _y_position = rect.y() + rect.height() - bottom_left_height + self.spacing()\n\n # Get the larger bottom height\n bottom_height = bottom_left_height if bottom_left_height > bottom_height else bottom_height\n\n # Set the geometry of the widget in the correct position and width\n item.setGeometry(QtCore.QRect(_x_position, _y_position, _item_width, item.geometry().height()))\n\n elif position == CustomLayout.BOTTOM_CENTER:\n # get X position is the x position of window + its width\n _x_position = rect.x() + _item_width_p + (_item_width_p - _item_width) / 2\n\n # Set the abstract geometry position for get its height to define its Y position correctly\n item.setGeometry(\n QtCore.QRect(item.geometry().x(), item.geometry().y(), rect.width(), item.sizeHint().height()))\n\n # Get the current bottom center height\n bottom_center_height += item.geometry().height() + self.spacing()\n\n # Get the Y position\n _y_position = rect.y() + rect.height() - bottom_center_height + self.spacing()\n\n # Get the larger bottom height\n bottom_height = bottom_center_height if bottom_center_height > bottom_height else bottom_height\n\n # Set the geometry of the widget in the correct position and width\n item.setGeometry(QtCore.QRect(_x_position, _y_position, _item_width, item.geometry().height()))\n\n elif position == CustomLayout.BOTTOM_RIGHT:\n # get X position is the x position of window + its 2 * width\n _x_position = rect.x() + 2 * _item_width_p\n _x_position += _item_width_p - _item_width - self.margin_right\n\n # Set the abstract geometry position for get its height to define its Y position correctly\n item.setGeometry(\n QtCore.QRect(item.geometry().x(), item.geometry().y(), rect.width(), item.sizeHint().height()))\n\n # Get the Y position\n bottom_right_height += item.geometry().height() + self.spacing()\n\n # Get the Y position\n _y_position = rect.y() + rect.height() - bottom_right_height + self.spacing()\n\n # Get the larger bottom height\n bottom_height = bottom_right_height if bottom_right_height > bottom_height else bottom_height\n\n # Set the geometry of the widget in the correct position and width\n item.setGeometry(QtCore.QRect(_x_position, _y_position, _item_width, item.geometry().height()))\n\n elif position == CustomLayout.MIDDLE_CENTER:\n # Center is the its item because its a special case and will be treated below\n center = wrapper\n\n elif position == CustomLayout.BOTTOM:\n # get X position is the x position of window\n _x_position = rect.x() + self.margin_left\n\n # Set the abstract geometry position for get its height to define its Y position correctly\n item.setGeometry(\n QtCore.QRect(item.geometry().x(), item.geometry().y(), rect.width(), item.sizeHint().height()))\n\n # Get the current bottom left height\n bottom_left_height += item.geometry().height() + self.spacing()\n\n # Get the Y position\n _y_position = rect.y() + rect.height() - bottom_left_height + self.spacing()\n\n # Get the larger bottom height\n bottom_height = bottom_left_height if bottom_left_height > bottom_height else bottom_height\n\n # Set the geometry of the widget in the correct position and width\n item.setGeometry(QtCore.QRect(_x_position, _y_position, rect.width() - self.spacing(), item.geometry().height()))\n\n # Get the center height, its item height - top height - bottom height\n center_height = rect.height() - top_height - bottom_height\n\n for wrapper in self.list:\n # Now, the center_height was declared and the middle and center positions can be defined\n item = wrapper.item\n position = wrapper.position\n\n if position == CustomLayout.MIDDLE_LEFT:\n # Get the X position\n _x_position = rect.x() + left_width + self.margin_left\n\n # Set the item in the left geometry\n item.setGeometry(QtCore.QRect(_x_position, top_height, item.sizeHint().width(), center_height))\n\n # get the left width\n left_width += item.geometry().width() + self.spacing()\n\n elif position == CustomLayout.MIDDLE_RIGHT:\n # Set on abstract geometry for get necessary dimensions\n item.setGeometry(QtCore.QRect(item.geometry().x(),\n item.geometry().y(), item.sizeHint().width(),\n center_height))\n\n # Get its width\n right_width += item.geometry().width() + self.spacing() + self.margin_right\n\n # Get the X position\n _x_position = rect.x() + rect.width() - right_width + self.spacing()\n\n # Set in the correct position\n item.setGeometry(QtCore.QRect(_x_position, top_height, item.geometry().width(),\n item.geometry().height()))\n\n if center:\n # Get the center width\n center_width = rect.width() - right_width - left_width\n\n # Set the center geometry\n center.item.setGeometry(QtCore.QRect(left_width, top_height, center_width, center_height))\n\n def sizeHint(self):\n return self.calculateSize(CustomLayout.SIZE_HINT)\n\n def takeAt(self, index):\n if index >= 0 and index < len(self.list):\n layoutStruct = self.list.pop(index)\n return layoutStruct.item\n\n return None\n\n def add(self, item, position):\n self.list.append(ItemWrapper(item, position))\n\n def calculateSize(self, sizeType):\n totalSize = QtCore.QSize()\n\n for wrapper in self.list:\n position = wrapper.position\n itemSize = QtCore.QSize()\n\n if sizeType == CustomLayout.MINIMUM_SIZE:\n itemSize = wrapper.item.minimumSize()\n else: # sizeType == BorderLayout.SizeHint\n itemSize = wrapper.item.sizeHint()\n\n if position in (CustomLayout.TOP_CENTER, CustomLayout.BOTTOM_CENTER, CustomLayout.MIDDLE_CENTER):\n totalSize.setHeight(totalSize.height() + itemSize.height())\n\n if position in (CustomLayout.MIDDLE_LEFT, CustomLayout.MIDDLE_RIGHT, CustomLayout.MIDDLE_CENTER):\n totalSize.setWidth(totalSize.width() + itemSize.width())\n\n return totalSize\n\n\nclass ItemWrapper(object):\n \"\"\" Class to aux the CustomLayout Class \"\"\"\n def __init__(self, i, p):\n self.item = i\n self.position = p\n","sub_path":"old/whatsapp_controller/gui/view/util/custom_layout.py","file_name":"custom_layout.py","file_ext":"py","file_size_in_byte":13553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"370032958","text":"#!/usr/bin/env python3\n\"\"\"\nJSON 制御のサンプル\n\"\"\"\nimport json\n\nFILE_NAME: str = 'test.json'\n\ndef save_json(data: dict):\n \"\"\"\n JSON 書き込み\n \"\"\"\n with open(FILE_NAME, 'w') as file:\n json.dump(data, file)\ndef load_json() -> dict:\n \"\"\"\n JSON 読み込み\n \"\"\"\n data: dict\n with open(FILE_NAME, 'r') as file:\n data = json.load(file)\n return data\n\nif __name__ == '__main__':\n # 辞書作成\n json_data: dict = dict()\n json_data['param1'] = 2019\n json_data['param2'] = 'Hello'\n json_data['param3'] = [2019, 3, 24]\n # json として保存\n print(\"[save]\\n{}\".format(json_data))\n save_json(json_data)\n # json 読み込み\n json_data.clear()\n json_data = load_json()\n print(\"[load]]\\n{}\".format(json_data))\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"394734003","text":"import math\nimport time\nimport numpy as np\n\n\ntopicIds = [[1, 2, 3], [2, 3, 4], [1, 4], [2, 3]]\nanswerIds = [[6, 4], [1, 2], [5], [3]]\nviews = [[2, 1, 2], [6, 3, 5], [3, 3, 0], [5, 1, 1], [4, 2, 3], [1, 4, 2]]\n\nflatten = lambda l: [item for sublist in l for item in sublist]\nunique = lambda l: list(set(l))\nlimit = lambda l: l[0:max(10, len(l))]\ntopic_ids_all = unique(flatten(topicIds))\nuser_ids_all = unique([item[1] for item in views])\nprint(topic_ids_all)\nprint(user_ids_all)\n\n\ntopic_to_answers = {}\ntopic_user_to_views = {}\nresult = []\nfor topic in sorted(topic_ids_all):\n topic_to_answers[topic] = flatten([answerIds[i] for i, elem in enumerate(topicIds) if topic in elem])\n views_to_answer = [elem[1:] for elem in views for answer in topic_to_answers[topic] if elem[0] == answer]\n for user_views in views_to_answer:\n key = (topic, user_views[0])\n if not key in topic_user_to_views:\n topic_user_to_views[key] = [user_views[1]]\n else:\n topic_user_to_views[key].append(user_views[1])\n\nprint(topic_to_answers)\nprint(topic_user_to_views)\ntopic_user_to_views = {k: sum(v) for k, v in topic_user_to_views.items()}\nprint(topic_user_to_views)\n\nfor topic in sorted(topic_ids_all):\n result.append([[k[1], v] for k, v in topic_user_to_views.items() if k[0] == topic])\n\n# result.sort(key=lambda ll: (lambda l: l[1])(ll))\nresult = [limit(sorted(item, key = lambda x: (x[1], -x[0]), reverse=True)) for item in result]\n# print(type(result[0].sort()))\n[print(x) for x in result]\n\n\n\n","sub_path":"bots/quorabot/quorabot_top_user.py","file_name":"quorabot_top_user.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"495215632","text":"from art_archive import app, whereCondition, g\nfrom flask_json import json_response\nimport pymysql.cursors\n\n@app.route('/artist', methods=['GET'])\ndef showArtist():\n try:\n code = None\n message = None\n\n connection = g.connection\n\n where_condition = \"\"\n where_condition = whereCondition(where_condition, \"artist_name\", \"a.name\")\n where_condition = whereCondition(where_condition, \"country\", \"a.country\")\n where_condition = whereCondition(where_condition, \"genre\", \"a.genre\")\n where_condition = whereCondition(where_condition, \"image_title\", \"b.title\")\n\n if where_condition == \"\":\n raise Exception(400, \"No parameter\")\n\n with connection.cursor() as cursor:\n sql = \"\"\"\n SELECT distinct a.*\n FROM artists AS a LEFT JOIN images AS b\n ON a.id = b.artist_id\n WHERE %s\n \"\"\" % where_condition\n\n cursor.execute(sql)\n result = cursor.fetchall()\n\n except pymysql.Error as e:\n print(\"Error message: {}\".format(e.args[1]))\n code = 400\n message = e.args[1]\n connection.close()\n\n except Exception as e:\n print(\"Error message: {}\".format(e.args[1]))\n code = e.args[0]\n message = \"Please type search condition\"\n\n finally:\n if code is not None:\n return json_response(status_=code, message=message)\n\n connection.close()\n\n return json_response(status_=200, artists=result)\n\n","sub_path":"art_archive/showArtist.py","file_name":"showArtist.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"269083471","text":"#paddle game\n\"\"\"\nto do:\nmake a high score list?\n\"\"\"\n\nfrom tkinter import *\nimport time, random, pickle, paddleGameInit\n\npaddleGameInit.run('dontDeleteMe!!!')\n\ntk = Tk()\ncanvas = Canvas(tk, width = 700, height = 500)\n\ntk.title(\"Paddle Game\")\n\ngameMode = input(\"high score chalange? (y, n): \")\n\ndef endGame(winner):\n canvas.create_text(350, 250, text = \"Player \" + str(winner) + \" Wins!!!\")\n\nclass Player0:\n def __init__(self, canvas, color):\n self.canvas = canvas\n self.id = self.canvas.create_polygon((0, 0), (0, 70), (10, 70), (10, 0), fill = color)\n self.canvas.move(self.id, 650, 250)\n self.x = 0\n self.y = 0\n self.canvas.bind_all(\"\", self.moveUp)\n self.canvas.bind_all(\"\", self.moveDown)\n self.canvasHeight = 500\n def move(self):\n self.canvas.move(self.id, self.x, self.y)\n pos = self.canvas.coords(self.id)\n if pos[1] <= 0:\n self.y = 0\n if pos[3] >= self.canvasHeight:\n self.y = 0\n def moveDown(self, evt):\n self.y = 3\n def moveUp(self, evt):\n self.y = -3\n\nclass Player1:\n def __init__(self, canvas, color):\n self.canvas = canvas\n self.id = self.canvas.create_polygon((0, 0), (0, 70), (10, 70), (10, 0), fill = color)\n self.canvas.move(self.id, 50, 250)\n self.x = 0\n self.y = 0\n self.canvas.bind_all(\"\", self.moveUp)\n self.canvas.bind_all(\"\", self.moveDown)\n self.canvasHeight = 500\n def move(self):\n self.canvas.move(self.id, self.x, self.y)\n pos = self.canvas.coords(self.id)\n if pos[1] <= 0:\n self.y = 0\n if pos[3] >= self.canvasHeight:\n self.y = 0\n def moveDown(self, evt):\n self.y = 3\n def moveUp(self, evt):\n self.y = -3\n\nclass Ball:\n def __init__(self, canvas, player0, player1, mode, color):\n self.canvas = canvas\n self.paddle0 = player0\n self.paddle1 = player1\n self.id = self.canvas.create_oval((0, 0), (25, 25), fill = color)\n self.canvas.move(self.id, 350, 250)\n self.starts = [-2, -1, 0, 1, 2]\n random.shuffle(self.starts)\n self.x = 2\n self.y = self.starts[1]\n self.canvasHeight = 500\n self.canvasWidth = 700\n self.winner = None\n self.leftScore = 0\n self.rightScore = 0\n if mode == \"No\" or \"no\" or \"N\" or \"n\":\n self.winScore = 9999\n else:\n self.winScore = 3\n def hitPaddle(self, pos):\n paddlePos = self.canvas.coords(self.paddle0.id)\n if pos[2] >= paddlePos[0] and pos[0] <= paddlePos[2]:\n if pos[3] >= paddlePos[1] and pos[3] <= paddlePos[3]:\n return True\n paddlePos = self.canvas.coords(self.paddle1.id)\n if pos[2] >= paddlePos[0] and pos[0] <= paddlePos[2]:\n if pos[3] >= paddlePos[1] and pos[3] <= paddlePos[3]:\n return True\n return False\n def move(self):\n pos = self.canvas.coords(self.id)\n random.shuffle(self.starts)\n if pos[1] <= 0:\n self.y = 2\n if pos[3] >= self.canvasHeight:\n self.y = -2\n if self.hitPaddle(pos) == True:\n self.y = self.starts[1]\n self.x = self.starts[0]\n if pos[0] <= 0:\n self.x = self.starts[0]\n if self.x < 0:\n self.x = self.x * -1\n self.leftScore += 1 #comment this line to make the left side invincible\n if pos[2] >= self.canvasWidth:\n self.x = self.starts[0]\n if self.x > 0:\n self.x = self.x *-1\n self.rightScore += 1 #comment this line to make the right side invincible\n self.canvas.move(self.id, self.x, self.y)\n def end(self):\n if self.rightScore >= self.winScore:\n self.winner = 1\n if self.leftScore >= self.winScore:\n self.winner = 0\n\n#init window\n\ncanvas.pack()\ntk.update_idletasks()\ntk.update()\n\ntime.sleep(1)\n\n#init sprites\n\nplayer0 = Player0(canvas, 'red')\nplayer1 = Player1(canvas, 'blue')\nball = Ball(canvas, player0, player1, str(gameMode), 'yellow')\n\nrightScore = canvas.create_text(75, 50, text = str(ball.rightScore))\nleftScore = canvas.create_text(625, 50, text = str(ball.leftScore))\n\n#\n\ndef end(evt):\n if ball.rightScore > ball.leftScore:\n ball.winner = 1\n else:\n ball.winner = 0\n\ncanvas.bind_all(\"\", end)\n\n#main loop\n\nwhile ball.winner == None:\n canvas.itemconfig(rightScore, text = str(ball.rightScore))\n canvas.itemconfig(leftScore, text = str(ball.leftScore))\n ball.end()\n player0.move()\n player1.move()\n ball.move()\n tk.update_idletasks()\n tk.update()\n time.sleep(0.01)\n\n\nendGame(ball.winner)\n\nif ball.winner == 1:\n score = ball.rightScore\nelse:\n score = ball.leftScore\n\nx = True\nfile = open(\"highScores.dat\", mode = 'rb')\nscores = pickle.load(file)\nfile.close()\nscores.append(score)\nscores.sort(reverse = True)\nwhile x == True:\n if len(scores) >=6:\n scores.pop()\n else:\n x = False\nfile = open(\"highScores.dat\", mode = 'wb')\npickle.dump(scores, file)\nfile.close()\nprint(scores)\n\n","sub_path":"final/paddleGame.py","file_name":"paddleGame.py","file_ext":"py","file_size_in_byte":5238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"627881842","text":"import flask\nimport os\nimport time\nfrom flask import Flask, render_template, jsonify\n\nhealth = True\nready = True\n\napp = flask.Flask(__name__)\napp.config[\"DEBUG\"] = True\n\ndef current_milli_time():\n return round(time.time() * 1000)\n\n@app.route('/')\ndef index():\n return render_template(\"index.html\")\n\n@app.route('/health')\ndef healthcheck():\n if health:\n return render_template(\"health.html\")\n else:\n return 'bad request!', 400\n\n@app.route('/sethealth')\ndef sethealth():\n global health\n health = False\n chopstick = {\n 'health': health\n }\n return jsonify(chopstick)\n\n@app.route('/ready')\ndef readycheck():\n if ready:\n return render_template(\"health.html\")\n else:\n return 'bad request!', 400\n\n@app.route('/setunready/')\ndef setunready(time_unv):\n timer = current_milli_time()\n global ready\n ready = False\n chopstick = {\n 'ready': ready\n }\n while current_milli_time() < timer + time_unv*1000:\n ready = False\n ready = True\n return jsonify(chopstick)\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", debug=True, port=\"5000\")","sub_path":"appidade/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"35575109","text":"\"\"\"\nFlask web server.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport datetime\nimport json\nimport os\nimport random\nimport hashlib\nimport requests\nimport base64\nimport attr\nimport time\nimport pathlib\nfrom glob import glob\nfrom urllib.parse import unquote_plus\nfrom typing import Dict, Optional\nfrom Crypto.Cipher import AES\nfrom PIL import Image\n\nfrom flask import Flask, render_template, jsonify, request, url_for\nfrom flask import session, current_app, redirect, make_response, Response, send_file\nfrom flask_session import Session\nfrom flask_login import LoginManager, login_required, login_user, current_user, logout_user\nfrom flask_pymongo import PyMongo\nfrom bson import json_util\nfrom werkzeug.utils import secure_filename\n\nfrom annotation_tools import default_config as cfg\nfrom annotation_tools.config import default\n\nimport sys\nsys.path.append('config/default.py')\n\napp = Flask(__name__)\n\napp.config.from_object('annotation_tools.config.default')\n# app.config['GOOGLE_CLIENT_ID'] = os.environ.get(\"GOOGLE_CLIENT_ID\", default=\"120971085062-trbgdnaksj7tttjdivmqfeb8jk360949.apps.googleusercontent.com\")\n# app.config['GOOGLE_CLIENT_SECRET'] = os.environ.get(\"GOOGLE_CLIENT_SECRET\", default=\"yq2vVwkgEsLqOoZkCO9uTbR7\")\n# app.config['HOSTNAME'] = os.environ.get(\"HOSTNAME\", default=\"http://localhost:8008\")\n# app.config['SECRET_KEY'] = os.environ.get(\"SECRET_KEY\", default=\"super-secret\")\napp.config['SESSION_TYPE'] = 'filesystem'\n# app.secret_key = \"super-secret\"\n#app.config.from_object('annotation_tools.default_config')\napp.config['MONGO_URI'] = 'mongodb://'+cfg.MONGO_HOST+':'+str(cfg.MONGO_PORT)+'/'+cfg.MONGO_DBNAME\n# if 'VAT_CONFIG' in os.environ:\n# app.config.from_envvar('VAT_CONFIG')\n\nmongo = PyMongo(app)\n\nSession(app)\n\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\n\n@attr.s\nclass User(object):\n id = attr.ib()\n name = attr.ib()\n email = attr.ib()\n editingBatchId = attr.ib()\n is_authenticated = True\n is_active = True\n is_anonymous = False\n\n def get_id(self):\n return self.id\n\nusers: Dict[str, User] = {}\n\n@login_manager.unauthorized_handler\ndef unauthorized_callback():\n return redirect('/')\n\n@login_manager.user_loader\ndef load_user(user_id) -> Optional[User]:\n app.logger.debug('looking for user %s', user_id)\n u = users.get(user_id, None)\n if not id:\n return None\n return u\n\ndef generate_nonce(length=8):\n \"\"\"Generate pseudorandom number.\"\"\"\n return ''.join([str(random.randint(0, 9)) for i in range(length)])\n\ndef get_db():\n \"\"\" Return a handle to the database\n \"\"\"\n with app.app_context():\n db = mongo.db\n return db\n\n############### Dataset Utilities ###############\n\n@app.route('/')\ndef home():\n if current_user.is_authenticated:\n return redirect(url_for('dashboard'))\n else:\n return render_template('login.html')\n\n@app.route(\"/dashboard\")\n@login_required\ndef dashboard():\n if current_user.email in app.config[\"ADMIN_EMAIL\"]:\n is_admin = True\n else:\n is_admin = False\n data = mongo.db.user.find_one({'id' : current_user.id})\n if not data:\n data = {\n \"verified\": \"not verified\"\n }\n else:\n if \"verified\" not in data:\n data[\"verified\"] = \"pending\"\n return render_template('dashboard.html', verified = data[\"verified\"], is_admin = is_admin)\n\n@app.route(\"/setting\")\n# @login_required\ndef setting():\n if current_user.email in app.config[\"ADMIN_EMAIL\"]:\n return redirect(url_for('admin'))\n\n data = mongo.db.user.find_one({'id' : current_user.id})\n if not data:\n data = {\n \"verified\": \"not verified\"\n }\n else:\n if \"verified\" not in data:\n data[\"verified\"] = \"pending\"\n return render_template('setting.html', verified = data[\"verified\"])\n\n@app.route(\"/salary\")\n# @login_required\ndef salary():\n if current_user.email in app.config[\"ADMIN_EMAIL\"]:\n is_admin = True\n else:\n is_admin = False\n return render_template('salary.html', is_admin = is_admin)\n\n@app.route(\"/admin\")\n# @login_required\ndef admin():\n print(\"admin\")\n return render_template('admin.html')\n\n@app.route('/users/all', methods=['GET'])\ndef get_users():\n data = []\n records = mongo.db.user.find({})\n for record in records:\n data.append(record)\n return json.dumps(data, default=str)\n\n@app.route('/salary/user', methods=['GET'])\ndef get_salary():\n if current_user.email in app.config[\"ADMIN_EMAIL\"]:\n return redirect(url_for('get_salaries'))\n userId = current_user.id\n data = []\n records = mongo.db.salary.find({\"userId\": userId})\n for record in records:\n data.append(record)\n return json.dumps(data, default=str)\n\n@app.route('/salaries/all', methods=['GET'])\ndef get_salaries():\n data = []\n records = mongo.db.salary.find({})\n for record in records:\n data.append(record)\n return json.dumps(data, default=str)\n\n@app.route('/user/verify', methods=['PUT'])\ndef verify_user():\n userId = request.args.get('id')\n verify = request.args.get('verify')\n\n mongo.db.user.update_one({'id' : userId}, {'$set' :{'verified': verify}})\n return \"\"\n\n@app.route('/info/update', methods=['POST'])\ndef update_info():\n info = json_util.loads(json.dumps(request.json['info']))\n\n return \"\"\n\n@app.route(\"/logout\")\ndef logout():\n logout_user()\n r = requests.get('https://accounts.google.com/Logout')\n # return redirect(url_for('home'))\n return redirect(\"https://www.google.com/accounts/Logout?continue=https://appengine.google.com/_ah/logout?continue=\" + current_app.config[\"HOSTNAME\"])\n\n@app.route(\"/login\", methods=['GET'])\ndef login() -> Response:\n # 1. Create an anti-forgery state token\n state = hashlib.sha256(os.urandom(1024)).hexdigest()\n session['state'] = state\n\n nonce = generate_nonce()\n session['nonce'] = nonce\n\n # 2. Send an authentication request to Google\n payload = {\n 'client_id': current_app.config[\"GOOGLE_CLIENT_ID\"],\n 'response_type': 'code',\n 'scope': 'openid email profile',\n 'redirect_uri': current_app.config[\"HOSTNAME\"]+'/callback',\n 'state': state,\n 'nonce': nonce,\n }\n r = requests.get('https://accounts.google.com/o/oauth2/v2/auth?', payload)\n\n # app.logger.debug('session id is %s', session.sid)\n print('session id is %s', session.sid)\n\n return redirect(r.url)\n\n@app.route(\"/callback\", methods=['GET'])\ndef callback() -> Response:\n print(\"callback\")\n # 3. Confirm anti-forgery state token\n if request.args.get('state', '') != session['state']:\n response = make_response(json.dumps('Invalid state parameter.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # 4. Exchange code for access token and ID token\n code = request.args.get('code', '')\n payload = {\n 'code': code,\n 'client_id': current_app.config[\"GOOGLE_CLIENT_ID\"],\n 'client_secret': current_app.config[\"GOOGLE_CLIENT_SECRET\"],\n 'redirect_uri': current_app.config[\"HOSTNAME\"]+'/callback',\n 'grant_type': 'authorization_code',\n }\n\n endpoint = 'https://www.googleapis.com/oauth2/v4/token'\n\n r = requests.post(endpoint, payload)\n if r.status_code != requests.codes.ok:\n response = make_response(json.dumps('Got error from Google.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n id_token = r.json()['id_token']\n\n # 5. Obtain user information from the ID token\n jwt = id_token.split('.')\n jwt_payload = json.loads(base64.b64decode(jwt[1] + \"===\"))\n\n if jwt_payload['nonce'] != session.pop('nonce', ''):\n response = make_response(json.dumps('Invalid nonce.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n if jwt_payload['iss'] != 'https://accounts.google.com':\n response = make_response(json.dumps('Invalid issuer.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n print(jwt_payload)\n user_id = 'google-' + jwt_payload['sub']\n print(\"user id = \" + user_id)\n\n u = User(user_id, jwt_payload['name'], jwt_payload['email'], -1)\n\n # Automatically add users to DB (a dict).\n users[user_id] = u\n\n login_user(u)\n\n response = make_response(json.dumps(user_id))\n response.headers['Content-Type'] = 'application/json'\n return redirect(url_for('home'))\n\n@app.route(\"/info/personal\", methods=['GET'])\n@login_required\ndef get_personal_info():\n print(\"get_personal_info\")\n print(current_user.id)\n data = mongo.db.user.find_one_or_404({'id' : current_user.id})\n return json.dumps(data, default=str)\n\n@app.route(\"/info/personal\", methods=['POST'])\n@login_required\ndef update_personal_info():\n print(\"update personal\")\n info = json_util.loads(json.dumps(request.form))\n info[\"id\"] = current_user.id\n info[\"verified\"] = \"pending\"\n # aes = AES.new('This is a key123', AES.MODE_CBC, 'This is an IV456')\n # info[\"id_number\"] = aes.encrypt(info[\"id_number\"]+\"888888\")\n print(current_user)\n print(info)\n mongo.db.user.replace_one({'id' : info['id']}, info, upsert=True)\n return json.dumps({\"123\":\"QWQ\"})\n\n@app.route('/download/info', methods=['GET'])\ndef downloadFileInfo ():\n files = {}\n for dirpath, dirnames, filenames in os.walk(\"annotation_tools/files\"):\n for filename in [f for f in filenames if f.endswith(\"upload.pdf\")]:\n files[dirpath.split('/')[-1]] = filename\n print(os.path.join(dirpath.split('/')[-1], filename))\n\n return json.dumps(files)\n\n@app.route('/download/')\ndef downloadUserFile (user_dir):\n user_dir = unquote_plus(user_dir)\n print(user_dir)\n path = \"files/\" + user_dir + \"/upload.pdf\"\n return send_file(path, as_attachment=True)\n\n@app.route('/download')\ndef downloadFile ():\n #For windows you need to use drive name [ex: F:/Example.pdf]\n path = \"files/downloadable/download.pdf\"\n return send_file(path, as_attachment=True)\n\nALLOWED_EXTENSIONS = {'pdf'}\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n@app.route('/upload', methods=['POST'])\n@login_required\ndef uploadFile():\n if current_user.email in app.config[\"ADMIN_EMAIL\"]:\n is_admin = True\n else:\n is_admin = False\n\n if 'file' not in request.files:\n flash('No file part')\n return \"\"\n file = request.files['file']\n basedir = os.path.abspath(os.path.dirname(__file__))\n if is_admin:\n uploadPath = os.path.join(basedir, 'files/%s'%('downloadable'))\n file.filename = \"download.pdf\" \n else:\n uploadPath = os.path.join(basedir, 'files/%s(%s)'%(current_user.name, current_user.email))\n file.filename = \"upload.pdf\" \n pathlib.Path(uploadPath).mkdir(exist_ok=True)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(uploadPath, filename))\n return redirect(url_for('dashboard'))\n return \"\"\n\n@app.route('/edit_images')\n@login_required\ndef edit_images():\n\n new_directory_list = []\n for root, dirs, files in os.walk(\"./data/images\", topdown=False):\n for idx, name in enumerate(dirs):\n if not mongo.db.batch.find_one({'folder_name' : name}):\n print(\"new folder\")\n new_directory_list.append(name)\n \n for new_folder in new_directory_list:\n curBatchId = mongo.db.batch.count_documents({})\n curImageId = mongo.db.image.count_documents({})\n count = 0\n for root, dirs, files in os.walk(\"./data/images/\" + new_folder):\n for f in files:\n curImageId = curImageId + 1\n count = count + 1\n url = 'http://' + '127.0.0.1' + ':' + '6678' + '/' + new_folder + '/' + f\n image = Image.open(root + '/' + f)\n image_data = {\n \"id\": '{:08d}'.format(curImageId),\n \"file_name\": f,\n \"width\": image.size[0],\n \"height\": image.size[1],\n \"date_captured\": datetime.datetime.utcnow().isoformat(' '),\n \"license\": 1,\n \"coco_url\": url,\n \"flickr_url\": \"\",\n \"url\": url,\n \"rights_holder\": \"\"\n }\n mongo.db.image.replace_one({'id' : curImageId}, image_data, upsert=True)\n\n if count % 5 == 0:\n curBatchId = curBatchId + 1\n mongo.db.batch.replace_one({'id' : curBatchId}, \n {\n 'id': '{:08d}'.format(curBatchId),\n 'start_image_id': curImageId - 4,\n 'end_image_id': curImageId,\n 'folder_name': new_folder,\n 'annotated': False,\n 'annotater': '',\n 'progress': 0,\n 'checked': False,\n 'paid': False\n }, \n upsert=True)\n batch = mongo.db.batch.find_one({'annotater' : current_user.id, 'progress' : {\"$lt\": 5}})\n if batch == None:\n batch = mongo.db.batch.find_one_or_404({'annotated' : False})\n print(batch)\n current_user.editingBatchId = batch['id']\n batch['annotated'] = True\n batch['annotater'] = current_user.id\n mongo.db.batch.replace_one({'id' : batch['id']}, batch)\n images = list()\n annotations = list()\n for i in range(batch['start_image_id'], batch['end_image_id']+1):\n # print(i)\n image_id = '{:08d}'.format(i)\n image = mongo.db.image.find_one_or_404({'id' : image_id})\n annotation = list(mongo.db.annotation.find({'image_id' : image_id}))\n images.append(image)\n annotations.append(annotation)\n categories = list(mongo.db.category.find())\n\n images = json_util.dumps(images)\n annotations = json_util.dumps(annotations)\n categories = json_util.dumps(categories)\n\n if request.is_xhr:\n # Return just the data\n return jsonify({\n 'images' : json.loads(images),\n 'annotations' : json.loads(annotations),\n 'categories' : json.loads(categories)\n })\n else:\n # Render a webpage to edit the annotations for this image\n return render_template('edit_images.html', images=images, annotations=annotations, categories=categories)\n\n@app.route('/edit_image/')\n# @login_required\ndef edit_image(image_id):\n \"\"\" Edit a single image.\n \"\"\"\n\n image = mongo.db.image.find_one_or_404({'id' : image_id})\n annotations = list(mongo.db.annotation.find({'image_id' : image_id}))\n categories = list(mongo.db.category.find())\n\n image = json_util.dumps(image)\n annotations = json_util.dumps(annotations)\n categories = json_util.dumps(categories)\n\n if request.is_xhr:\n # Return just the data\n return jsonify({\n 'image' : json.loads(image),\n 'annotations' : json.loads(annotations),\n 'categories' : json.loads(categories)\n })\n else:\n # Render a webpage to edit the annotations for this image\n return render_template('edit_image.html', image=image, annotations=annotations, categories=categories)\n\n@app.route('/edit_task/')\ndef edit_task():\n \"\"\" Edit a group of images.\n \"\"\"\n\n if 'image_ids' in request.args:\n\n image_ids = request.args['image_ids'].split(',')\n\n else:\n\n start=0\n if 'start' in request.args:\n start = int(request.args['start'])\n end=None\n if 'end' in request.args:\n end = int(request.args['end'])\n\n # Find annotations and their accompanying images for this category\n if 'category_id' in request.args:\n category_id = request.args['category_id']\n annos = mongo.db.annotation.find({ \"category_id\" : category_id}, projection={'image_id' : True, '_id' : False})#.sort([('image_id', 1)])\n image_ids = list(set([anno['image_id'] for anno in annos]))\n image_ids.sort()\n\n # Else just grab all of the images.\n else:\n images = mongo.db.image.find(projection={'id' : True, '_id' : False}).sort([('id', 1)])\n image_ids = [image['id'] for image in images]\n\n if end is None:\n image_ids = image_ids[start:]\n else:\n image_ids = image_ids[start:end]\n\n if 'randomize' in request.args:\n if request.args['randomize'] >= 1:\n random.shuffle(image_ids)\n\n categories = list(mongo.db.category.find(projection={'_id' : False}))\n\n return render_template('edit_task.html',\n task_id=1,\n image_ids=image_ids,\n categories=categories,\n )\n@app.route('/batch/save', methods=['POST'])\ndef save_batch():\n info = json_util.loads(request.data)\n # print(info)\n print(current_user.editingBatchId)\n progress = info['imagesAnnotated'].count(True)\n mongo.db.batch.update_one({'id': current_user.editingBatchId}, {'$set': {'progress': progress}})\n \n return \"\"\n\n@app.route('/annotations/save', methods=['POST'])\ndef save_annotations():\n \"\"\" Save the annotations. This will overwrite annotations.\n \"\"\"\n annotations = json_util.loads(json.dumps(request.json['annotations']))\n\n for annotation in annotations:\n # Is this an existing annotation?\n if '_id' in annotation:\n if 'deleted' in annotation and annotation['deleted']:\n mongo.db.annotation.delete_one({'_id' : annotation['_id']})\n else:\n result = mongo.db.annotation.replace_one({'_id' : annotation['_id']}, annotation)\n else:\n if 'deleted' in annotation and annotation['deleted']:\n pass # this annotation was created and then deleted.\n else:\n # This is a new annotation\n # The client should have created an id for this new annotation\n # Upsert the new annotation so that we create it if its new, or replace it if (e.g) the\n # user hit the save button twice, so the _id field was never seen by the client.\n assert 'id' in annotation\n mongo.db.annotation.replace_one({'id' : annotation['id']}, annotation, upsert=True)\n\n # if 'id' not in annotation:\n # insert_res = mongo.db.annotation.insert_one(annotation, bypass_document_validation=True)\n # anno_id = insert_res.inserted_id\n # mongo.db.annotation.update_one({'_id' : anno_id}, {'$set' : {'id' : str(anno_id)}})\n # else:\n # insert_res = mongo.db.insert_one(annotation)\n\n return \"\"\n\n#################################################\n\n################## BBox Tasks ###################\n\n@app.route('/bbox_task/')\ndef bbox_task(task_id):\n \"\"\" Get the list of images for a bounding box task and return them along\n with the instructions for the task to the user.\n \"\"\"\n\n bbox_task = mongo.db.bbox_task.find_one_or_404({'id' : task_id})\n task_id = str(bbox_task['id'])\n tasks = []\n for image_id in bbox_task['image_ids']:\n image = mongo.db.image.find_one_or_404({'id' : image_id}, projection={'_id' : False})\n tasks.append({\n 'image' : image,\n 'annotations' : []\n })\n\n category_id = bbox_task['category_id']\n categories = [mongo.db.category.find_one_or_404({'id' : category_id}, projection={'_id' : False})]\n #categories = json.loads(json_util.dumps(categories))\n\n task_instructions_id = bbox_task['instructions_id']\n task_instructions = mongo.db.bbox_task_instructions.find_one_or_404({'id' : task_instructions_id}, projection={'_id' : False})\n\n return render_template('bbox_task.html',\n task_id=task_id,\n task_data=tasks,\n categories=categories,\n mturk=True,\n task_instructions=task_instructions\n )\n\n@app.route('/bbox_task/save', methods=['POST'])\ndef bbox_task_save():\n \"\"\" Save the results of a bounding box task.\n \"\"\"\n\n task_result = json_util.loads(json.dumps(request.json))\n\n task_result['date'] = str(datetime.datetime.now())\n\n insert_res = mongo.db.bbox_task_result.insert_one(task_result, bypass_document_validation=True)\n\n return \"\"\n\n#################################################\n","sub_path":"annotation_tools/annotation_tools.py","file_name":"annotation_tools.py","file_ext":"py","file_size_in_byte":19407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"107886192","text":"'''\nGiven a collection of candidate numbers (candidates) and a target number (target), find all unique combinations in candidates where the candidate numbers sum to target.\n\nEach number in candidates may only be used once in the combination.\n\nNote: The solution set must not contain duplicate combinations.\n'''\nfrom time import time\nfrom typing import List\n\n\nclass Solution:\n def combinationSum2(\n self, candidates: List[int], target: int\n ) -> List[List[int]]:\n candidates.sort()\n res = []\n\n def dfs(i, subset, total):\n if total == target:\n res.append(subset[:])\n return\n\n prev = None\n for j in range(i, len(candidates)):\n if candidates[j] == prev:\n # Skip if same as previous value or if over target\n continue\n if total + candidates[j] > target:\n break\n # Next index cannot be the same so use j + 1\n dfs(j + 1, subset + [candidates[j]], total + candidates[j])\n prev = candidates[j]\n\n return\n\n dfs(0, [], 0)\n return res\n\n def reference(self, candidates: List[int], target: int) -> List[List[int]]:\n candidates.sort()\n\n res = []\n\n def backtrack(curr, pos, target):\n if target == 0:\n res.append(curr.copy())\n if target <= 0:\n return\n\n prev = -1\n for i in range(pos, len(candidates)):\n if candidates[i] == prev:\n continue\n curr.append(candidates[i])\n backtrack(curr, i + 1, target - candidates[i])\n curr.pop()\n prev = candidates[i]\n\n backtrack([], 0, target)\n return res\n\n def quantify(self, test_cases, runs=50000):\n sol_start = time()\n for i in range(runs):\n for case in test_cases:\n if i == 0:\n print(self.combinationSum2(*case))\n else:\n self.combinationSum2(*case)\n print(f'Runtime for our solution: {time() - sol_start}\\n')\n\n ref_start = time()\n for i in range(0, runs):\n for case in test_cases:\n if i == 0:\n print(self.reference(*case))\n else:\n self.reference(*case)\n print(f'Runtime for reference: {time() - ref_start}')\n\n\nif __name__ == '__main__':\n test = Solution()\n test_cases = [([10, 1, 2, 7, 6, 1, 5], 8), ([2, 5, 2, 1, 2], 5)]\n test.quantify(test_cases)\n","sub_path":"NeetCode +75/10 - Backtracking/40-combination-sum-II.py","file_name":"40-combination-sum-II.py","file_ext":"py","file_size_in_byte":2639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"189349031","text":"\"\"\"\n70. Climbing Stairs\nYou are climbing a stair case. It takes n steps to reach to the top.\n\nEach time you can either climb 1 or 2 steps. In how many distinct ways can you climb to the top?\n\nNote: Given n will be a positive integer.\n\"\"\"\n\n\nclass Solution(object):\n\n # Brute force, time limit exceed\n def climbStairs_1(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n target = n\n\n def __climb(current):\n if current > target: # the last `current + 2`\n return 0\n elif current == target: # the last `current + 1`\n return 1\n else: # current <= target\n return __climb(current + 1) + __climb(current + 2)\n\n return __climb(0)\n\n # ref: Approach #2 Recursion with memorization [Accepted] https://leetcode.com/articles/climbing-stairs/\n # AC, 39ms, 24.76%\n def climbStairs_2(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n memo = [None] * n\n\n def __climb(current):\n nonlocal memo, n\n if current > n:\n return 0\n elif current == n:\n return 1\n\n # current < n\n if memo[current] is not None:\n return memo[current]\n\n memo[current] = __climb(current + 1) + __climb(current + 2)\n return memo[current]\n\n return __climb(0)\n\n # ref: Approach #3 Dynamic Programming [Accepted]\n # AC, 32ms, 61.72%\n def climbStairs_3(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n if n <= 2:\n return n\n\n dp = [None] * n\n dp[0] = 1\n dp[1] = 2\n for i in range(2, n):\n dp[i] = dp[i-1] + dp[i-2]\n return dp[n-1]\n\n # ref: Approach #4 Fibonacci Number [Accepted]\n # use Fibonacci number, AC\n def climbStairs(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n if n <= 2:\n return n\n\n f1 = 1\n f2 = 2\n for i in range(3, n+1):\n f1, f2 = f2, f1+f2 # 55ms\n # f3 = f1 + f2 # 68ms\n # f1 = f2\n # f2 = f3\n return f2\n\n\n\n\n\n\n\ndef main():\n tc_list = [\n [1, 1],\n [2, 2],\n [3, 3],\n [4, 5]\n ]\n\n solution = Solution()\n for tc in tc_list:\n input_ = tc[0]\n expected = tc[1]\n output = solution.climbStairs(input_)\n\n assert output == expected\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"archived/070_Climbing_Stairs/alg_070_solution.py","file_name":"alg_070_solution.py","file_ext":"py","file_size_in_byte":2524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"173694348","text":"# -*- coding: utf-8 -*-\n##\n## This file is part of Invenio.\n## Copyright (C) 2006, 2007, 2008, 2010, 2011 CERN.\n##\n## Invenio is free software; you can redistribute it and/or\n## modify it under the terms of the GNU General Public License as\n## published by the Free Software Foundation; either version 2 of the\n## License, or (at your option) any later version.\n##\n## Invenio is distributed in the hope that it will be useful, but\n## WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n## General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License\n## along with Invenio; if not, write to the Free Software Foundation, Inc.,\n## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.\nfrom invenio import web_api_key\n\n\"\"\"Unit tests for REST like authentication API.\"\"\"\n\ntry:\n import hashlib\nexcept:\n pass\nimport unittest\nimport re\nimport hmac\nimport urllib\nimport time\nimport string\n\nfrom invenio.testutils import make_test_suite, run_test_suite\nfrom invenio.dbquery import run_sql\n\nweb_api_key.CFG_WEB_API_KEY_ALLOWED_URL = [('/search\\?*', 0, True),\n ('/bad\\?*', -1, True)] #Just for testing\n\nweb_api_key._CFG_WEB_API_KEY_ALLOWED_URL = [(re.compile(_url), _authorized_time, _need_timestamp)\n for _url, _authorized_time, _need_timestamp in web_api_key.CFG_WEB_API_KEY_ALLOWED_URL]\n\ndef build_web_request(path, params, api_key=None, secret_key=None):\n items = (hasattr(params, 'items') and [params.items()] or [list(params)])[0]\n if api_key:\n items.append(('apikey', api_key))\n if secret_key:\n items.append(('timestamp', str(int(time.time()))))\n items = sorted(items, key=lambda x: x[0].lower())\n url = '%s?%s' % (path, urllib.urlencode(items))\n signature = hmac.new(secret_key, url, hashlib.sha1).hexdigest()\n items.append(('signature', signature))\n if not items:\n return path\n return '%s?%s' % (path, urllib.urlencode(items))\n\nclass APIKeyTest(unittest.TestCase):\n \"\"\" Test functions related to the REST authentication API \"\"\"\n def setUp(self):\n self.id_admin = run_sql('SELECT id FROM user WHERE nickname=\"admin\"')[0][0]\n\n def test_create_remove_show_key(self):\n \"\"\"apikey - create/list/delete REST key\"\"\"\n self.assertEqual(0, len(web_api_key.show_web_api_keys(uid=self.id_admin)))\n web_api_key.create_new_web_api_key(self.id_admin, \"Test key I\")\n web_api_key.create_new_web_api_key(self.id_admin, \"Test key II\")\n web_api_key.create_new_web_api_key(self.id_admin, \"Test key III\")\n web_api_key.create_new_web_api_key(self.id_admin, \"Test key IV\")\n web_api_key.create_new_web_api_key(self.id_admin, \"Test key V\")\n self.assertEqual(5, len(web_api_key.show_web_api_keys(uid=self.id_admin)))\n self.assertEqual(5, len(web_api_key.show_web_api_keys(uid=self.id_admin, diff_status='')))\n keys_info = web_api_key.show_web_api_keys(uid=self.id_admin)\n web_api_key.mark_web_api_key_as_removed(keys_info[0][0])\n self.assertEqual(4, len(web_api_key.show_web_api_keys(uid=self.id_admin)))\n self.assertEqual(5, len(web_api_key.show_web_api_keys(uid=self.id_admin,diff_status='')))\n\n run_sql(\"UPDATE webapikey SET status='WARNING' WHERE id=%s\", (keys_info[1][0],))\n run_sql(\"UPDATE webapikey SET status='REVOKED' WHERE id=%s\", (keys_info[2][0],))\n\n self.assertEqual(4, len(web_api_key.show_web_api_keys(uid=self.id_admin)))\n self.assertEqual(5, len(web_api_key.show_web_api_keys(uid=self.id_admin, diff_status='')))\n\n run_sql(\"DELETE FROM webapikey\")\n\n def test_acc_get_uid_from_request(self):\n \"\"\"webapikey - Login user from request using REST key\"\"\"\n path = '/search'\n params = 'ln=es&sc=1&c=Articles & Preprints&action_search=Buscar&p=ellis'\n\n self.assertEqual(0, len(web_api_key.show_web_api_keys(uid=self.id_admin)))\n web_api_key.create_new_web_api_key(self.id_admin, \"Test key I\")\n\n key_info = run_sql(\"SELECT id FROM webapikey WHERE id_user=%s\", (self.id_admin,))\n url = web_api_key.build_web_request(path, params, api_key=key_info[0][0])\n url = string.split(url, '?')\n uid = web_api_key.acc_get_uid_from_request(url[0], url[1])\n self.assertEqual(uid, self.id_admin)\n\n url = web_api_key.build_web_request(path, params, api_key=key_info[0][0])\n url += \"123\" # corrupt the key\n url = string.split(url, '?')\n uid = web_api_key.acc_get_uid_from_request(url[0], url[1])\n self.assertEqual(uid, -1)\n\n path = '/bad'\n uid = web_api_key.acc_get_uid_from_request(path, \"\")\n self.assertEqual(uid, -1)\n params = { 'nocache': 'yes', 'limit': 123 }\n url = web_api_key.build_web_request(path, params, api_key=key_info[0][0])\n url = string.split(url, '?')\n uid = web_api_key.acc_get_uid_from_request(url[0], url[1])\n self.assertEqual(uid, -1)\n\n run_sql(\"DELETE FROM webapikey\")\n\nTEST_SUITE = make_test_suite(APIKeyTest)\n\nif __name__ == \"__main__\":\n run_test_suite(TEST_SUITE)\n run_sql(\"DELETE FROM webapikey\")","sub_path":"lib/python/invenio/web_api_key_unit_tests.py","file_name":"web_api_key_unit_tests.py","file_ext":"py","file_size_in_byte":5248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"21813655","text":"'''\nCreated on Dec 2, 2017\n\n@author: flyn\n'''\nimport os\n_dir = os.path.dirname(__file__)\n\nmodel = os.path.join(_dir, \"facades_sandbox/export\")\noutput_file = os.path.join(_dir, \"facades_sandbox/output_dir/output_inference.png\")\ntmp_file = os.path.join(_dir, \"facades_sandbox/input_dir/image.png\")\nmaps_model = os.path.join(_dir, \"maps_sandbox/export\")\noutput_maps_file = os.path.join(_dir, \"maps_sandbox/output_dir/output_inference.png\")\nmaps_init_ouput = output_resize_maps_file = os.path.join(_dir, \"maps_sandbox/output_dir/resize_output_inference.png\")\nmaps_tmp_file = os.path.join(_dir, \"maps_sandbox/input_dir/image.jpg\")\nmaps_init_file = os.path.join(_dir, \"maps_sandbox/init_image/start_map.jpg\")\n\n\n \nbuttons_dict = {\"Wall\": (13,61,251),\n \"Door\": (165,0,0),\n \"Window\": (0,117,255),\n \"W.Sill\": (104,248,152),\n \"W.Head\": (29,255,221),\n \"Shutter\": (238,237,40),\n \"Balcony\": (184,255,56),\n \"Trim\": (255,146,4),\n \"Cornice\": (255,68,1),\n \"Column\": (246,0,1),\n \"Entrance\": (0,201,255)}\n\n\nmaps_buttons_dict = {\"Street\": (255, 255, 255),\n \"Block\": (230, 230, 225),\n \"Grass\": (205, 220, 175),\n \"Buildings\": (245, 240, 235)}\n\n","sub_path":"ui/utility/inference_config.py","file_name":"inference_config.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"545720943","text":"# Gyan Tatiya\n\nfrom pdb import set_trace\nimport pickle\n\nimport scipy.sparse as sp\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef normalize_adj(adj):\n adj = sp.coo_matrix(adj)\n rowsum = np.array(adj.sum(1))\n d_inv_sqrt = np.power(rowsum, -0.5).flatten()\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.0\n d_mat_inv_sqrt = sp.diags(d_inv_sqrt)\n\n return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()\n\n\nclass GCN(nn.Module):\n def __init__(self):\n super(GCN, self).__init__()\n\n # get and normalize adjacency matrix.\n adjmat_path = r\"data/glove_data/adjmat.bin\"\n bin_file = open(adjmat_path, \"rb\")\n A_raw = pickle.load(bin_file)\n bin_file.close()\n A = normalize_adj(A_raw).tocsr().toarray()\n self.A = torch.nn.Parameter(torch.Tensor(A))\n\n embeddings_path = r\"data/glove_data/glove_embeddings_300d.bin\"\n bin_file = open(embeddings_path, \"rb\")\n objects_vector = pickle.load(bin_file)\n regions_vector = pickle.load(bin_file)\n bin_file.close()\n\n objects = list(sorted(objects_vector.keys()))\n regions = list(sorted(regions_vector.keys()))\n\n self.n = len(objects) + len(regions)\n\n all_glove = torch.zeros(self.n, 300)\n i = 0\n for obj in objects:\n all_glove[i, :] = torch.Tensor(objects_vector[obj])\n i += 1\n for reg in regions:\n all_glove[i, :] = torch.Tensor(regions_vector[reg])\n i += 1\n\n self.all_glove = nn.Parameter(all_glove)\n self.all_glove.requires_grad = False\n\n self.get_word_embed = nn.Linear(300, self.n)\n\n self.W0 = nn.Linear(self.n * 2, 1024, bias=False)\n self.W1 = nn.Linear(1024, 1024, bias=False)\n self.W2 = nn.Linear(1024, 1, bias=False)\n\n self.feature_dims = 256 - 2\n self.final_mapping = nn.Linear(self.n, self.feature_dims)\n\n def forward(self, class_embed):\n\n class_embed = class_embed.reshape(1, -1)\n word_embed = self.get_word_embed(self.all_glove.detach())\n x = torch.cat((class_embed.repeat(self.n, 1), word_embed), dim=1)\n x = torch.mm(self.A, x)\n x = F.relu(self.W0(x))\n x = torch.mm(self.A, x)\n x = F.relu(self.W1(x))\n x = torch.mm(self.A, x)\n x = F.relu(self.W2(x))\n x = x.view(1, self.n)\n x = self.final_mapping(x)\n\n return x\n\nimport dgl\nfrom dgl.nn import GraphConv\nimport scipy.sparse as spp\n\n\nclass DGL_GCN(nn.Module):\n def __init__(self, in_feats=(256-2)*2, o_feats=1, debug=False):\n super(DGL_GCN, self).__init__()\n assert in_feats % 2 == 0, \"in_feats should be even.\"\n self.debug = debug\n adjmat_path = r\"data/glove_data/adjmat.bin\"\n with open(adjmat_path, 'rb') as f:\n adj_mat = pickle.load(f)\n adj_mat = spp.coo_matrix(adj_mat + np.eye(adj_mat.shape[0]))\n self.g = dgl.DGLGraph(adj_mat)\n\n embeddings_path = r\"data/glove_data/glove_embeddings_300d.bin\"\n with open(embeddings_path, 'rb') as bin_file:\n objects_vector = pickle.load(bin_file)\n regions_vector = pickle.load(bin_file)\n\n objects = list(sorted(objects_vector.keys())) # 21 objects: ['bathtub', ..., 'tv_monitor']\n regions = list(sorted(regions_vector.keys())) # 24 regions: ['balcony', ..., 'workout/gym/exercise']\n self.n = len(objects) + len(regions)\n if self.debug:\n print(\"objects: \", len(objects)) # , objects)\n print(\"regions: \", len(regions)) # , regions)\n\n all_glove = torch.zeros(self.n, 300)\n i = 0\n for obj in objects:\n all_glove[i, :] = torch.Tensor(objects_vector[obj])\n i += 1\n for reg in regions:\n all_glove[i, :] = torch.Tensor(regions_vector[reg])\n i += 1\n if self.debug:\n print(\"all_glove: \", all_glove.shape)\n print(\"all_glove \", all_glove)\n\n self.all_glove = nn.Parameter(all_glove)\n self.all_glove.requires_grad = False\n self.feature_dims = int(in_feats / 2)\n self.get_word_embed = nn.Linear(300, self.feature_dims)\n self.get_img_embed = nn.Linear(self.n, self.feature_dims)\n\n self.conv1 = GraphConv(in_feats, in_feats)\n self.conv2 = GraphConv(in_feats, in_feats)\n self.conv3 = GraphConv(in_feats, o_feats)\n\n self.final_mapping = nn.Linear(self.n, self.feature_dims)\n\n def forward(self, class_embed):\n\n class_embed = class_embed.reshape(1, -1)\n class_embed = self.get_img_embed(class_embed)\n if self.debug:\n print(\"class_embed: \", class_embed.shape)\n word_embed = self.get_word_embed(self.all_glove)\n if self.debug:\n print(\"word_embed: \", word_embed.shape)\n x = torch.cat((class_embed.repeat(self.n, 1), word_embed), dim=1)\n if self.debug:\n print(\"torch.cat((class_embed.repeat(self.n, 1), word_embed), dim=1): \", x.shape)\n\n if self.debug:\n print(\"\\n**************devices**************\")\n print(f\"g.device: {self.g.device}\")\n print(f\"conv1.device: {self.conv1.weight.device}\")\n print(f\"conv2.device: {self.conv2.weight.device}\")\n print(f\"conv3.device: {self.conv3.weight.device}\")\n print(f\"final_mapping.weight.device: {self.final_mapping.weight.device}\")\n print(\"**************devices**************\")\n\n self.g = self.g.to(class_embed.device)\n h = self.conv1(self.g, x)\n h = F.relu(h)\n h = self.conv2(self.g, h)\n h = F.relu(h)\n h = self.conv3(self.g, h)\n o = F.relu(h)\n if self.debug:\n print(f\"o: {o.shape}\")\n o = o.view(1, -1)\n o = self.final_mapping(o)\n\n return o\n\n# if __name__ == '__main__':\n# model = DGL_GCN(in_feats=(256-2)*2, debug=True)\n# model = model.to('cuda')\n# joint_rep = torch.rand(45).to('cuda')\n# output = model(joint_rep)\n# print(f'The shape of output of GCN: {output.shape}')\n","sub_path":"ss_baselines/saven/models/gcn.py","file_name":"gcn.py","file_ext":"py","file_size_in_byte":6099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"95208125","text":"\"\"\"add users table\n\nRevision ID: 267e9efef687\nRevises: \nCreate Date: 2018-02-18 17:37:22.715432\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy import func\n\nfrom sqlalchemy.dialects.postgresql import UUID, JSON\n# revision identifiers, used by Alembic.\nrevision = '267e9efef687'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n\n # We want to avoid an error trying to use the uuid_generate_v4(), so we have to install this extension\n # Example Error: sqlalchemy.exc.ProgrammingError: (psycopg2.ProgrammingError) function uuid_generate_v4() does not exist\n op.execute('CREATE EXTENSION IF NOT EXISTS \"uuid-ossp\";')\n\n op.create_table(\n 'users',\n sa.Column('created_at', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),\n sa.Column('updated_at', sa.DateTime(timezone=True), server_default=func.now(), nullable=False),\n sa.Column('id', UUID(), nullable=False, server_default=func.uuid_generate_v4()),\n sa.Column('email', sa.String(), nullable=False),\n sa.Column('password', sa.String(), nullable=True),\n sa.Column('meta', JSON(), nullable=True),\n sa.Column('active', sa.Boolean(), nullable=False),\n sa.Column('is_system_user', sa.Boolean(), nullable=False),\n sa.PrimaryKeyConstraint('id'),\n )\n op.create_index(op.f('ix_users_created_at'), 'users', ['created_at'], unique=False)\n op.create_index(op.f('ix_users_updated_at'), 'users', ['updated_at'], unique=False)\n op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)\n op.create_index(op.f('ix_users_active'), 'users', ['active'], unique=False)\n\n\ndef downgrade():\n # op.drop_index(op.f('ix_users_created_at'), table_name='users')\n # op.drop_index(op.f('ix_users_updated_at'), table_name='users')\n # op.drop_index(op.f('ix_users_email'), table_name='users')\n # op.drop_index(op.f('ix_users_active'), table_name='users')\n op.drop_table('users')\n","sub_path":"migrations/versions/20180218_267e9efef687_add_users_table.py","file_name":"20180218_267e9efef687_add_users_table.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"455312683","text":"# -*- coding:utf-8 -*-\nimport sys\nfrom urllib import parse, request, error\nimport base64\nimport re\nimport datetime\nimport os\nimport json\nfrom M2Crypto import BIO, RSA # dnf install python3-m2crypto.x86_64 -y\nimport hashlib\nimport traceback\n\n\ndef getjsonfiledata(encrypt: \"检查是否已经获取密钥对\" = True):\n \"\"\"读入配置文件 testconfig.json ,请先配置它,并先执行 test_gettotptoken.py 。\"\"\"\n\n # tlog(\"读入配置文件 ...\")\n f = open(\"testconfig.json\", 'r')\n lines = f.read()\n f.close()\n jsonfiledata = json.loads(lines)\n if jsonfiledata[\"apiver\"] == \"\" or jsonfiledata[\"url\"] == \"\":\n terr(\"错误: 'testconfig.json' 配置不完全。\")\n exit()\n if encrypt and (jsonfiledata[\"publickey\"] == \"\" or jsonfiledata[\"privatekey\"] == \"\"):\n terr(\"错误: 需要一个初始的密钥对。\")\n exit()\n return jsonfiledata\n\n\ndef rsaEncrypt(public_key: \"公钥\", message: \"要加密的信息\", showAllInfo=True):\n \"\"\"RSA 加密\"\"\"\n bio = BIO.MemoryBuffer(public_key)\n rsa_pub = RSA.load_pub_key_bio(bio)\n buffer = None\n while message:\n input = message[:245]\n if showAllInfo:\n tlog(\"正在加密分段 ...\")\n tlog(input)\n snidata = rsa_pub.public_encrypt(input, RSA.pkcs1_padding)\n if buffer == None:\n buffer = snidata\n else:\n buffer = buffer+snidata\n message = message[245:]\n ctxt64_pri = base64.b64encode(buffer)\n return ctxt64_pri\n\n\ndef rsaDecrypt(private_key: \"私钥\", message: \"要解密的信息\", showAllInfo=True):\n \"\"\"RSA 解密\"\"\"\n if (isinstance(private_key, bytes) == False):\n private_key = bytes(private_key, encoding=\"utf8\")\n bio = BIO.MemoryBuffer(private_key)\n rsa_pri = RSA.load_key_bio(bio)\n buffer = None\n while message:\n input = message[:512]\n if showAllInfo:\n tlog(\"正在解密分段 ...\")\n snidata = rsa_pri.private_decrypt(input, RSA.pkcs1_padding)\n if showAllInfo:\n tlog(snidata)\n if buffer == None:\n buffer = snidata\n else:\n buffer = buffer+snidata\n message = message[512:]\n return buffer\n\n\ndef postarray_p(postUrl: \"提交到指定的URL\", jsonDataArr: \"提交的数据数组\", showAllInfo=True):\n \"\"\"[明文传输]向服务器提交内容并显示返回内容,明文操作\"\"\"\n\n jsonfiledata = getjsonfiledata(False)\n apiverAppidSecret = [jsonfiledata[\"apiver\"], jsonfiledata[\"apptoken\"]]\n\n if (showAllInfo):\n tlog(\"传输模式:明文\")\n tlog(\"准备输入的数据 ...\")\n tlog(postUrl)\n tlog(jsonDataArr)\n if (showAllInfo):\n tlog(\"读取 testconfig.json ...\")\n totptoken = jsonfiledata[\"apptoken\"]\n if (showAllInfo):\n tlog(\"插入固定提交信息 ...\")\n jsonDataArr[\"apptoken\"] = totptoken\n jsonDataArr[\"apiver\"] = apiverAppidSecret[0]\n postMod = parse.urlencode(jsonDataArr).encode(encoding='utf-8')\n if (showAllInfo):\n tlog(json.dumps(jsonDataArr))\n tlog(\"↑ 发送请求:\")\n tlog(postMod.decode())\n postReq = request.Request(url=postUrl, data=postMod)\n try:\n postRes = request.urlopen(postReq)\n except error.HTTPError as e:\n terr(\"错误:HTTP 连接遇到问题!\")\n tlog(e)\n tlog(\"使用 cURL 获取原始数据 ...\")\n curlcmd = 'curl -X POST -d \"'+postMod.decode()+'\" \"'+postUrl+'\"'\n tlog(curlcmd)\n output = os.popen(curlcmd)\n tlog(output.read())\n sys.exit(1)\n except error.URLError as e:\n terr(\"错误:网址不正确!\")\n tlog(e)\n sys.exit(1)\n postRes = postRes.read()\n postRes = postRes.decode(encoding='utf-8')\n if (showAllInfo):\n tlog(\"↓ 收到数据:\")\n tlog(postRes)\n tlog(\"JSON 解析 ...\")\n try:\n dataarr = json.loads(postRes)\n except:\n terr(\"错误:解密失败。\")\n tlog(\"原始内容:\")\n tlog(postRes)\n sys.exit()\n tlog(dataarr)\n tok(\"完成。\")\n return dataarr\n\n# appKeyMode: 0.使用'd' 1.apptoken作为key 2.apptoken加入json\n\n\ndef postarray(postUrl: \"提交到指定的URL\", jsonDataArr: \"提交的数据数组\", showAllInfo=True, publicKey: \"服务器公钥\" = None, privateKey: \"客户端私钥\" = None, appKeyMode=1):\n \"\"\"[加密传输]向服务器提交内容并显示返回内容,自动处理加密解密\"\"\"\n jsonfiledata = getjsonfiledata(True)\n if (showAllInfo):\n tlog(\"传输模式:加密\")\n tlog(postUrl)\n if (showAllInfo):\n tlog(\"读取 testconfig.json ...\")\n if publicKey == None:\n publicKey = jsonfiledata[\"publickey\"]\n if privateKey == None:\n privateKey = jsonfiledata[\"privatekey\"]\n if (showAllInfo):\n tlog(\"插入固定提交信息 ...\")\n if appKeyMode == 2:\n jsonDataArr[\"apptoken\"] = jsonfiledata[\"apptoken\"]\n jsonDataArr[\"apiver\"] = jsonfiledata[\"apiver\"]\n if (showAllInfo):\n tlog(\"准备输入的数据 ...\")\n jsondata = json.dumps(jsonDataArr)\n jsondata = str.encode(jsondata)\n if (showAllInfo):\n tlog(jsondata)\n if (showAllInfo):\n publicKeyStr = \"\"\n if (isinstance(publicKey, str) == False):\n publicKeyStr = str(publicKey, encoding=\"utf-8\")\n else:\n publicKeyStr = publicKey\n tlog(\"正在使用公钥 \"+md5(clearkey(publicKeyStr))+\" 加密数据 ...\")\n publicKey = str.encode(publicKey)\n if appKeyMode == 0:\n postKey = 'd'\n elif appKeyMode == 1:\n postKey = jsonfiledata[\"apptoken\"]\n postData = {\n postKey: rsaEncrypt(publicKey, jsondata, showAllInfo)\n }\n postMod = parse.urlencode(postData).encode(encoding='utf-8')\n if (showAllInfo):\n tlog(\"↑ 发送请求:\")\n if (showAllInfo):\n tlog(jsonDataArr)\n postReq = request.Request(url=postUrl, data=postMod)\n postRes = request.urlopen(postReq)\n postRes = postRes.read()\n if (showAllInfo):\n tlog(\"↓ 收到数据:\")\n postRes0 = postRes.decode(encoding='utf-8')\n postRes = postRes0\n if (showAllInfo):\n tlog(postRes)\n if postRes[0:1] == b'[' or postRes[0:1] == b'{':\n terr(\"收到了非预期的明文数据\")\n quit()\n if postRes[0:3] == '= 2000000:\n terr(\"返回状态码错误。\")\n terr(resArr['msg'])\n quit()\n tok(\"网络操作完成。\")\n tok(json.dumps(resArr, indent=2))\n if 'msg' in resArr:\n tok(resArr['msg'])\n else:\n tlog(\"服务器没有返回可读性状态信息。\")\n return resArr\n\n\ndef clearkey(keystr: \"密钥内容\"):\n \"\"\"只保留 key 的 base64 部分,删除首尾和回车\"\"\"\n keylines = keystr.split('\\n')\n if keylines[-1][0:5] == '-----':\n keylines.pop()\n if keylines[0][0:5] == '-----':\n del(keylines[0])\n return ''.join(keylines)\n\n\ndef md5(bstr: \"输入byte字符串\"):\n \"\"\"MD5 加密\"\"\"\n md5 = hashlib.md5()\n md5.update(bstr.encode('utf-8'))\n return md5.hexdigest()\n\n\ndef tlog(loginfo: \"信息内容\", end='\\n'):\n \"\"\"输出前面带时间的信息\"\"\"\n nowtime = datetime.datetime.now().strftime('[%Y-%m-%d %H:%M:%S.%f]')\n print(\"\\033[35m\", end='')\n print(nowtime, end='\\033[0m ')\n print(loginfo, end=end)\n\n\ndef terr(loginfo: \"信息内容\"):\n \"\"\"输出错误\"\"\"\n tlog(\"\\033[31m\"+loginfo+\"\\033[0m\")\n errinfo = traceback.format_tb(sys.exc_info()[2])\n if errinfo:\n for err in errinfo:\n tlog(\"\\033[31m\"+err+\"\\033[0m\")\n\n\ndef tok(loginfo: \"信息内容\"):\n \"\"\"输出正确\"\"\"\n tlog(\"\\033[32m\"+loginfo+\"\\033[0m\")\n\n\ndef title(loginfo: \"信息内容\"):\n \"\"\"输出标题\"\"\"\n tlog(\"\\033[1m\"+loginfo.center(40, '=')+\"\\033[0m\")\n\n\ndef instr(alertinfo: \"提示用户要输入的内容\", isint=False):\n \"\"\"接收用户输入\"\"\"\n tlog(\"\\033[1m\"+alertinfo+\"\\033[4m\", '')\n userinput = input()\n print(\"\\033[0m\", end='')\n if isint:\n return int(userinput)\n return userinput\n","sub_path":"tests/test_core.py","file_name":"test_core.py","file_ext":"py","file_size_in_byte":9971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"22593360","text":"#!/usr/bin/env python\n# Function which takes as inputs a hidden satate x_i and a pixel intensity y_i and then gives the \n#probability that the observed intensity of pixel i is y_i and does not consider information from\n#neigbouring particles\n\nfrom scipy import stats\nimport numpy as np\nimport matplotlib.pyplot as plt\n#import cv as cv2\nfrom PIL import Image\n\n#importing image and turning into greyscale (each pixel an array of values from 0-255)\nimport Image\nimage = Image.open(\"noisy_logo.png\")\ngray = np.asarray(image.convert('L'))\n \n\n#defining the outputs of function\ndef likelihood(y_i,x_i):\t\t\t\t\t\n\tif x_i == 1 and y_i <127:\n\t\t\tprobability = 0.15\n\tif x_i == 1 and y_i >=127:\n\t\t\tprobability = 0.85\n\tif x_i == 0 and y_i <127:\n\t\t\tprobability = 0.85\n\tif x_i == 0 and y_i >=127:\n\t\t\tprobability = 0.15\t\n\treturn probability\n\n\n#Testing function by making plot#\nx_i = 1\t\t\t#value of hidden state x_i\nprobability = []\t\t#array of probabilities\nfor y_i in range(0,255):\n\tprobability.append(likelihood(y_i,x_i))\nplt.plot(range(0,255),probability)\nplt.show()\n\n","sub_path":"ComputePixelLikelihood.py","file_name":"ComputePixelLikelihood.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"562767668","text":"import os\nimport unittest\n\nimport run_pipeline as sut\nfrom common.tests import build_argv, captured_output\n\ntry:\n from unittest.mock import patch, Mock, ANY\nexcept ImportError:\n from mock import patch, Mock, ANY\n\n\ndef toAbsolute(relative):\n # where is _this_ file?\n thisScriptDir = os.path.dirname(__file__)\n\n return os.path.join(thisScriptDir, relative)\n\n\n# ------------------------------------------------------------------------------\n# Classes\n# ------------------------------------------------------------------------------\n\n\nclass TestMain(unittest.TestCase):\n def setUp(self):\n self.optional = [\n '-o', 'www.example.org',\n '-p', '9200',\n '-u', 'foo',\n '-a', 'bar',\n '-i', 'lizard'\n ]\n\n @patch('run_pipeline.test_index_growing')\n @patch('run_pipeline.Elasticsearch')\n @patch('run_pipeline.parse_json')\n @patch('run_pipeline.taxonomy_index')\n @patch('run_pipeline.ccdb_index')\n def test_main_happy_path(\n self, ccdb_index, taxonomy_index, parser, es_ctor, test_growing\n ):\n es = Mock()\n es_ctor.return_value = es\n\n argv = build_argv(self.optional)\n with captured_output(argv) as (out, err):\n sut.main()\n\n test_growing.assert_called_once_with(es, 'lizard-v1')\n\n parser.assert_called_once_with(\n 'https://data.consumerfinance.gov/api/views/s6ew-h6mp/rows.json',\n 'complaints/ccdb/ccdb_output.json',\n ANY\n )\n\n ccdb_index.index_json_data.assert_called_once_with(\n es, ANY, 'complaint',\n 'complaints/settings.json',\n 'complaints/ccdb/ccdb_mapping.json',\n 'complaints/ccdb/ccdb_output.json',\n 'lizard-v1', 'lizard-v2', 'lizard'\n )\n\n taxonomy_index.index_taxonomy.assert_called_once_with(\n es, ANY, 'complaints/taxonomy/taxonomy.txt', 'lizard'\n )\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_run_pipeline.py","file_name":"test_run_pipeline.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"485956105","text":"#!/usr/bin/env python\nimport math\nimport sys\nimport random\nimport pointcurve\n\nif len(sys.argv) <= 3:\n sys.stderr.write(\"%s: USAGE %s duration ip_file src_ip\\n\" % (sys.argv[0], sys.argv[0]))\n sys.exit()\n\nl = 7.1\nqueryarr = lambda x: 1 - math.exp(-l*x)\n#queryintvl_r = lambda y: math.log(1-y)/(-l)\n\nqueryintvl_r = pointcurve.pointcurve('queryintvl.cdf').ytox\n\nneed = float(sys.argv[1])\nhave = 0\n\nf = open(sys.argv[2])\nip = f.readlines()\nip = [ t.replace('\\n','') for t in ip ]\nf.close()\n\nwhile True:\n intvl = random.random()\n intvl = queryintvl_r(intvl)\n have += intvl\n if have > need:\n sys.stderr.write(\"\\nlast intvl is %f, if included we have %f sec\\n\" % (intvl, have))\n break\n \n size = 2048\n \n src = sys.argv[3]\n #for dst in ip:\n #if dst == src: continue\n #sys.stdout.write(\"%.9f %s %d\\n\" % (intvl, dst, size))\n #intvl = 0\n sys.stdout.write(\"%.9f %s %d\\n\" % (intvl, src, size))\n","sub_path":"queryintvl.py","file_name":"queryintvl.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"588135168","text":"\ndef func_08_001():\n days = 1\n summ=0\n while summ<1000:\n summ += 2 ** days\n days += 1\n\n print(\"days=\"+str(days))\n\n\ndef func_08_002():\n print(\"введите простое число:\")\n x=int(input())\n days=1\n summ=0\n while summ < 1000:\n summ += x ** days\n days += 1\n print(\"days = \",days)\n\n\ndef func_08_003():\n l = 10\n summ = 10\n days=30\n for i in range(1,days,2):#необходимо повышать норму бега через одну тренировку\n l += l*0.15\n summ += l\n print(\"Общая длина за 30 дней = \",summ,'км')\n\n\n\n\ndef func_08_004_a():\n l = 10\n days = 1\n while l<20:\n days+=1\n l+=l*0.1\n print(\"Через \",days,\" дней спортсмен пробежит больше 20 км\")\n\n\ndef func_08_004_b():\n l = 10\n days = 1\n sum=10\n while sum<100:\n days+=1\n l+=l*0.1\n sum+=l\n print(\"Через \",days,\" дней спортсмен суммарный путь 100 км\")","sub_path":"project_x/08_while.py","file_name":"08_while.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"140607189","text":"from datetime import datetime\n\nfrom freezegun import freeze_time\nimport pytest\n\nfrom reviews.datasource.client import SQLClient\nfrom reviews.datasource import PullRequestManager\nfrom reviews.datasource import PullRequest, Label\n\n\n@pytest.fixture\ndef manager(setup_database):\n client = SQLClient(connection=setup_database)\n manager = PullRequestManager(client=client)\n manager.create_table()\n yield manager\n\n\n@pytest.fixture\n@freeze_time(\"2020-01-01T00:00:00+00:00\")\ndef pull_request():\n return PullRequest(\n number=1,\n title=\"[1] Initial Commit\",\n created_at=datetime.now(),\n updated_at=datetime.now(),\n approved=None,\n approved_by_others=False,\n labels=[Label(name=\"Python\")],\n )\n\n\ndef test_table_created(manager):\n assert manager.exists() is True\n\n\ndef test_table_dropped(manager):\n manager.drop_table()\n\n assert manager.exists() is False\n\n\ndef test_insert(manager, pull_request):\n manager.insert(model=pull_request)\n\n assert len(manager.all()) == 1\n\n\ndef test_select_by_id(manager, pull_request):\n manager.insert(model=pull_request)\n\n results = manager.get_by_id(row_id=1)\n\n assert len(results) == 1\n\n\ndef test_bulk_insert(manager):\n manager.insert_all(\n models=[\n PullRequest(\n number=1,\n title=\"[1] Initial Commit\",\n created_at=datetime.now(),\n updated_at=datetime.now(),\n approved=None,\n approved_by_others=False,\n labels=[Label(name=\"Python\")],\n ),\n PullRequest(\n number=2,\n title=\"[2] Adds README\",\n created_at=datetime.now(),\n updated_at=datetime.now(),\n approved=None,\n approved_by_others=False,\n labels=[Label(name=\"Python\")],\n ),\n ]\n )\n\n assert len(manager.all()) == 2\n\n\ndef test_update(manager, pull_request):\n last_row_id = manager.insert(model=pull_request)\n\n pull_request.title = \"[1] Initial setup of repository\"\n pull_request.approved_by_others = True\n manager.update(row_id=last_row_id, model=pull_request)\n\n assert len(manager.all()) == 1\n assert manager.get_by_id(row_id=last_row_id) == [\n (\n 1,\n 1,\n \"[1] Initial setup of repository\",\n \"2020-01-01 00:00:00\",\n \"2020-01-01 00:00:00\",\n None,\n 1,\n )\n ]\n\n\ndef test_delete(manager, pull_request):\n manager.insert(model=pull_request)\n\n assert len(manager.all()) == 1\n\n manager.delete(row_id=1)\n\n assert len(manager.all()) == 0\n","sub_path":"tests/datasource/test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":2685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"73990894","text":"#!/usr/bin/env python3\n\n# -------------------------------\n# projects/netflix/TestNetflix.py\n# Copyright (C) 2015\n# Glenn P. Downing\n# -------------------------------\n\n# https://docs.python.org/3.4/reference/simple_stmts.html#grammar-token-assert_stmt\n\n# -------\n# imports\n# -------\n\nfrom io import StringIO\nfrom unittest import main, TestCase\n\nfrom Netflix import netflix_rmse, netflix_predict, netflix_print, netflix_solve\n\n# -----------\n# TestNetflix\n# -----------\n\nclass TestNetflix (TestCase) :\n\n # ----\n # rmse\n # ----\n\n def test_rmse_1 (self) :\n i = [1,1]\n v = netflix_rmse(i)\n self.assertEqual(v, 1)\n\n def test_eval_2 (self) :\n i = [2, 2, 2, 2, 2, 2]\n v = netflix_rmse(i)\n self.assertEqual(v, 2)\n\n\n \n # -----\n # predict\n # -----\n\n def test_predict_1 (self) :\n i = \"1\"\n j = 2\n movie_avg_rating = {\"1\":4,\"2\":3}\n user_avg_rating = {1: 3, 2: 4}\n offset = {\"1\": 0, \"2\": 0}\n r = netflix_predict(i, j, movie_avg_rating, user_avg_rating, offset)\n self.assertEqual(r, 4.0)\n\n def test_predict_2 (self) :\n i = \"3\"\n j = 2\n movie_avg_rating = {\"1\":4,\"2\":3}\n user_avg_rating = {1: 3, 2: 4}\n offset = {\"1\": 0, \"2\": 0}\n r = netflix_predict(i, j, movie_avg_rating, user_avg_rating, offset)\n self.assertEqual(r, 4.0)\n\n def test_predict_3 (self) :\n i = \"1\"\n j = 8\n movie_avg_rating = {\"1\":4,\"2\":3}\n user_avg_rating = {1: 3, 2: 4}\n offset = {\"1\": 0, \"2\": 0}\n r = netflix_predict(i, j, movie_avg_rating, user_avg_rating, offset)\n self.assertEqual(r, 4.0)\n\n def test_predict_4 (self) :\n i = \"1\"\n j = 2\n movie_avg_rating = {\"1\":4,\"2\":3}\n user_avg_rating = {1: 3, 2: 4}\n offset = {\"1\": 0, \"2\": .15}\n r = netflix_predict(i, j, movie_avg_rating, user_avg_rating, offset)\n self.assertEqual(r, 4.072)\n \n def test_predict_5 (self) :\n i = \"9\"\n j = 10\n movie_avg_rating = {\"1\":4,\"2\":3}\n user_avg_rating = {1: 3, 2: 4}\n offset = {\"1\": 0, \"2\": 0}\n r = netflix_predict(i, j, movie_avg_rating, user_avg_rating, offset)\n self.assertEqual(r, 3.7)\n # -----\n # print\n # -----\n\n def test_print_1 (self) :\n w = StringIO()\n ratings = ['1:',2.0,4.0,5.0,1.1]\n netflix_print(w, ratings)\n self.assertEqual(w.getvalue(), \"1:\\n2.0\\n4.0\\n5.0\\n1.1\\n\")\n\n # -----\n # solve\n # -----\n def test_solve (self) :\n r = StringIO(\"1:\\n30878\\n2647871\")\n w = StringIO()\n netflix_solve(r,w)\n self.assertEqual(w.getvalue(), \"1:\\n3.7\\n3.3\\nRMSE: 0.51\\n\")\n# ----\n# main\n# ----\n\nif __name__ == \"__main__\" : #pragma: no cover\n main() #pragma: no cover\n\n\"\"\"\n% coverage3 run --branch TestNetflix.py > TestNetflix.out 2>&1\n\n\n\n% coverage3 report -m >> TestNetflix.out\n\n\n\n% cat TestNetflix.out\n.......\n----------------------------------------------------------------------\nRan 7 tests in 0.001s\n\nOK\nName Stmts Miss Branch BrMiss Cover Missing\n---------------------------------------------------------\nNetflix 18 0 6 0 100%\nTestNetflix 33 1 2 1 94% 79\n---------------------------------------------------------\nTOTAL 51 1 8 1 97%\n\"\"\" #pragma: no cover\n","sub_path":"ad36457-TestNetflix.py","file_name":"ad36457-TestNetflix.py","file_ext":"py","file_size_in_byte":3421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"222313748","text":"# -*- coding: utf-8 -*-\n\nfrom swagger_server.redis_operate.redis_unit import OperateRedis\nimport threading\nimport time\nimport datetime\nimport random\nimport json\nfrom enum import Enum\nfrom swagger_server.logs.log_unit import error_logs_http\n\nuser_code_info = dict()\n\n\nclass GroupOperate(Enum):\n NEW = 1\n UPDATE = 2\n DEL = 3\n\n\n# 加载 群组热数据\ndef group_member_hot_data_to_redis(group_id, group_enum, owner_id=0, group_member=list()):\n try:\n op_redis = OperateRedis()\n redis_pipeline = op_redis.pipeline()\n if group_enum == GroupOperate.NEW:\n if owner_id != 0:\n user_info = dict()\n if str(owner_id) in user_code_info.keys():\n user_info = user_code_info[str(owner_id)]\n tmp_data = {str(owner_id): user_info}\n redis_pipeline.sadd(\"group:\" + str(group_id), json.dumps(tmp_data))\n for member_id in group_member:\n user_info = dict()\n if member_id == 0:\n continue\n if str(member_id) in user_code_info.keys():\n user_info = user_code_info[str(member_id)]\n tmp_data = {str(member_id): user_info}\n redis_pipeline.sadd(\"group:\" + str(group_id), json.dumps(tmp_data))\n redis_pipeline.expire(\"group:\" + str(group_id), 86400)\n redis_pipeline.execute()\n elif group_enum == GroupOperate.UPDATE:\n redis_pipeline.delete(\"group:\" + str(group_id))\n user_info = dict()\n if owner_id != 0:\n if str(owner_id) in user_code_info.keys():\n user_info = user_code_info[str(owner_id)]\n tmp_data = {str(owner_id): user_info}\n redis_pipeline.sadd(\"group:\" + str(group_id), json.dumps(tmp_data))\n for member_id in group_member:\n user_info = dict()\n if member_id == 0:\n continue\n if str(member_id) in user_code_info.keys():\n user_info = user_code_info[str(member_id)]\n tmp_data = {str(member_id): user_info}\n redis_pipeline.sadd(\"group:\" + str(group_id), json.dumps(tmp_data))\n redis_pipeline.expire(\"group:\" + str(group_id), 86400)\n redis_pipeline.execute()\n elif group_enum == GroupOperate.DEL:\n redis_pipeline.delete(\"group:\" + str(group_id))\n redis_pipeline.execute()\n\n except Exception as err:\n error_logs_http(__name__, err, 'group_member_hot_data_to_redis')\n return 'group_member_hot_data_to_redis error'\n\n\n# 加载群组详细信息的热数据\ndef group_hot_data_to_redis(group_enum, group_info, group_id=0):\n try:\n if (group_enum == GroupOperate.NEW) or (group_enum == GroupOperate.UPDATE):\n data = dict()\n data['group_id'] = group_info[\"group_id\"]\n data['group_theme'] = group_info[\"group_theme\"]\n data['group_range'] = group_info[\"group_range\"]\n data['group_name'] = group_info[\"group_name\"]\n data['group_type'] = group_info[\"group_type\"]\n if \"owner_id\" in group_info.keys():\n data['owner_eid'] = group_info[\"owner_id\"]\n elif \"owner_eid\" in group_info.keys():\n data['owner_eid'] = group_info[\"owner_eid\"]\n data['create_time'] = str(group_info[\"create_time\"])\n data['invite_mode'] = group_info[\"invite_mode\"]\n data['admit_mode'] = group_info[\"admit_mode\"]\n data['proclamation'] = group_info[\"proclamation\"]\n if \"description\" in group_info.keys():\n data['description'] = group_info[\"description\"]\n elif \"describe\" in group_info.keys():\n data['description'] = group_info[\"describe\"]\n data_str = json.dumps(data)\n op_redis = OperateRedis()\n op_redis.set_record(key=\"group:info:\" + str(group_info[\"group_id\"]), value=data_str, expire=86400*2)\n elif group_enum == GroupOperate.DEL:\n op_redis = OperateRedis()\n op_redis.del_record(key=\"group:info:\" + str(group_id))\n except Exception as err:\n error_logs_http(__name__, err, 'group_hot_data_to_redis')\n return 'group_hot_data_to_redis error'\n\n\nclass UserCodeContainer(threading.Thread):\n def __init__(self, thread_name):\n threading.Thread.__init__(self)\n self._thread_name = str(thread_name)\n self.start()\n\n def run(self):\n print(\"UserInfo Thread: \" + self._thread_name)\n current_day = datetime.datetime.now().day\n op_redis = OperateRedis()\n data = op_redis.get_record(\"all_employee_info\")\n data_dict = json.loads(data.decode(\"utf-8\"))\n for item in data_dict:\n user_id = 0\n user_code = \"\"\n user_name = \"\"\n if \"username\" in item.keys():\n user_code = item['username']\n if 'employeeNo' in item.keys():\n user_id = item['employeeNo']\n if \"fullname\" in item.keys():\n user_name = item[\"fullname\"]\n\n user_code_info[str(user_id)] = {\"user_code\": user_code, \"user_name\": user_name}\n print(\"Group Info complete!\")\n while True:\n print(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n now_hour = datetime.datetime.now().hour\n if (now_hour == 19) and (current_day != datetime.datetime.now().day):\n sleep_seconds = random.randint(0, 300)\n time.sleep(sleep_seconds)\n op_redis = OperateRedis()\n data = op_redis.get_record(\"all_employee_info\")\n data_dict = json.loads(data.decode(\"utf-8\"))\n user_code_info.clear()\n for item in data_dict:\n user_id = 0\n user_code = \"\"\n user_name = \"\"\n if \"username\" in item.keys():\n user_code = item['username']\n if 'employeeNo' in item.keys():\n user_id = item['employeeNo']\n if \"fullname\" in item.keys():\n user_name = item[\"fullname\"]\n\n user_code_info[str(user_id)] = {\"user_code\": user_code, \"user_name\": user_name}\n current_day = datetime.datetime.now().day\n print(\"User Info update!\")\n else:\n time.sleep(300)\n\n\nif __name__ == \"__main__\":\n xx = UserCodeContainer()\n\n","sub_path":"python-flask-server/swagger_server/user_info_container/user_container.py","file_name":"user_container.py","file_ext":"py","file_size_in_byte":6587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"89124459","text":"import unittest\n\nfrom .token import Token\nfrom .tokenizer import Tokenizer, tokenize\n\n\nclass TestTokenizer(unittest.TestCase):\n \"\"\"\n Test the Tokenizer class.\n \"\"\"\n\n def test_create(self):\n \"\"\"\n Test creating a tokenizer.\n \"\"\"\n\n tk = Tokenizer(None)\n self.assertEqual(tk.get_token(), None)\n\n @staticmethod\n def convert_tokens(numbers):\n \"\"\"\n Convert a list of numbers to their respective Token values.\n \"\"\"\n\n return list(map(lambda t: Token.get_by_id(t), numbers))\n\n def test_tokenize_single_tokens(self):\n \"\"\"\n Test tokenizing single tokens.\n \"\"\"\n\n tokens = Token.reserved_words() + Token.special_symbols()\n for token in tokens:\n self.assertEqual(\n list(tokenize(token.value[1])),\n self.convert_tokens([token.value[0], 33]))\n\n def test_tokenize_integer_constants(self):\n \"\"\"\n Test tokenizing integer constants and retrieving their values.\n \"\"\"\n\n # sample numbers\n numbers = [0, 1, 5, 12, 26, 374, 142059492]\n\n # non-padded numbers\n for number in numbers:\n source = \"{0}\".format(number)\n tokens = [\n (Token.INTEGER_CONSTANT, number),\n (Token.EOF, None)]\n\n self.assertEqual(list(tokenize(source, values=True)), tokens)\n\n # padded numbers\n for number in numbers:\n source = \"{0:04d}\".format(number)\n tokens = [\n (Token.INTEGER_CONSTANT, number),\n (Token.EOF, None)]\n\n self.assertEqual(list(tokenize(source, values=True)), tokens)\n\n def test_tokenize_identifiers(self):\n \"\"\"\n Test tokenizing identifiers and retrieving their names.\n \"\"\"\n\n # sample identifiers\n identifiers = [\"NAME\", \"AGE\", \"P1\", \"P28\", \"P030\"]\n\n # identifier names\n for identifier in identifiers:\n source = identifier\n tokens = [\n (Token.IDENTIFIER, identifier),\n (Token.EOF, None)]\n\n self.assertEqual(list(tokenize(source, values=True)), tokens)\n\n def test_tokenize_samples(self):\n \"\"\"\n Test tokenizing sample inputs.\n \"\"\"\n\n # lab1 test01\n self.assertEqual(\n list(tokenize(\"program int X; begin X===328;XY74||\")),\n self.convert_tokens(\n [1, 4, 32, 12, 2, 32, 26, 14, 31, 12, 32, 19, 33]))\n\n # lab1 test02\n self.assertEqual(\n list(tokenize(\"program int X; begin X===328XY74||\")),\n self.convert_tokens(\n [1, 4, 32, 12, 2, 32, 26, 14, 34]))\n\n # lab1 test04\n self.assertEqual(\n list(tokenize(\"===XY74Z this remainder doesn't matter\")),\n self.convert_tokens([26, 14, 34]))\n\n # lab1 test05\n self.assertEqual(\n list(tokenize(\"||xy74 this remainder doesn't matter\")),\n self.convert_tokens([19, 34]))\n\n # lab1 test06\n self.assertEqual(\n list(tokenize(\"||74xy this remainder doesn't matter\")),\n self.convert_tokens([19, 34]))\n\n # lab1 test07\n self.assertEqual(\n list(tokenize(\";;XYxy this remainder doesn't matter\")),\n self.convert_tokens([12, 12, 34]))\n\n # lab1 test08\n self.assertEqual(\n list(tokenize(\";xyXY this remainder doesn't matter\")),\n self.convert_tokens([12, 34]))\n\n # lab1 test09\n self.assertEqual(\n list(tokenize(\";|this remainder doesn't matter\")),\n self.convert_tokens([12, 34]))\n\n # lab1 test10\n self.assertEqual(\n list(tokenize(\";%this remainder doesn't matter\")),\n self.convert_tokens([12, 34]))\n\n # lab2 test01\n source = \"\"\"\n program\n int X ;\n begin X = 25 ; write X ; end\n \"\"\"\n\n tokens = [1, 4,32, 12, 2, 32, 14, 31, 12, 11, 32, 12, 3, 33]\n self.assertEqual(list(tokenize(source)), self.convert_tokens(tokens))\n\n # lab2 test02\n source = \"\"\"\n program int ABC, D;\n\n begin read ABC; read D;\n\n while (ABC != D) loop\n if (ABC > D) then ABC = ABC - D;\n\n else D = D - ABC;\n\n end;\n\n end;\n\n write D;\n\n end\n \"\"\"\n\n tokens = [\n 1, 4, 32, 13, 32, 12, 2, 10, 32, 12, 10, 32, 12, 8, 20, 32, 25, 32,\n 21, 9, 5, 20, 32, 28, 32, 21, 6, 32, 14, 32, 23, 32, 12, 7, 32, 14,\n 32, 23, 32, 12, 3, 12, 3, 12, 11, 32, 12, 3, 33]\n\n self.assertEqual(list(tokenize(source)), self.convert_tokens(tokens))\n\n # lab2 test03\n source = \"\"\"\n program int X1, X4; int X2, X3, X7;\n\n begin X1=0; X2=1; X3=1; read X4;\n\n while (X1 < X4) loop\n\n X7=X2+X3; X2=X3;\n\n X3=X7; X1=X1+1;\n\n end ;\n\n write X2;\n\n end\n \"\"\"\n\n tokens = [\n 1, 4, 32, 13, 32, 12, 4, 32, 13, 32, 13, 32, 12, 2, 32, 14, 31, 12,\n 32, 14, 31, 12, 32, 14, 31, 12, 10, 32, 12, 8, 20, 32, 27, 32, 21,\n 9, 32, 14, 32, 22, 32, 12, 32, 14, 32, 12, 32, 14, 32, 12, 32, 14,\n 32, 22, 31, 12, 3, 12, 11, 32, 12, 3, 33]\n\n self.assertEqual(list(tokenize(source)), self.convert_tokens(tokens))\n","sub_path":"Core/cse3341/test_tokenizer.py","file_name":"test_tokenizer.py","file_ext":"py","file_size_in_byte":5672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"420263800","text":"# -*- coding: utf-8 -*-\n\nimport web\nimport json\nimport urllib\nfrom config import *\nfrom utils import *\nfrom session_manager import SessionManager\n\n\nclass Handler:\n def GET(self):\n params = web.input()\n\n if not SessionManager.instance().check_session(params.session_id, params.device_id, int(params.userid)):\n resp = {'res':401, 'msg':'登陆态异常'}\n return json.dumps(resp, ensure_ascii=False)\n\n data = {'device_id': params.device_id, 'userid': int(params.userid)}\n url = 'http://' + ACCOUNT_BACKEND + '/user_info?' + urllib.urlencode(data)\n\n r = http_request(url)\n if r.has_key('rtn') and r['rtn'] == 0:\n resp = {\n 'res': 0,\n 'msg': '',\n 'userid': int(r['userid']),\n 'phone': r['phone_num'],\n# 'nickname': r['nickname'],\n 'sex': int(r['sex']),\n 'age': int(r['age']),\n 'interest': r['interest']\n }\n else:\n resp = {'res': 1, 'msg': 'error'}\n\n return json.dumps(resp)\n\n","sub_path":"wangcai_svr/interface/src/req_account_info.py","file_name":"req_account_info.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"467488626","text":"# Untitled - By: Lei Zhao - 周五 11月 27 2020\r\n\r\nimport os, lcd, image, utime\r\n\r\nlcd.init()\r\nlcd.rotation(2)\r\nimg = image.Image()\r\nrecord_names = []\r\nrecord_ftrs = []\r\n\r\ndef loadInfo():\r\n # Load names info\r\n if \"names.txt\" in os.listdir():\r\n with open(\"names.txt\",'r') as f:\r\n record_names[:] = f.read().splitlines()\r\n # Load features info\r\n if \"ftrs.txt\" in os.listdir():\r\n with open(\"ftrs.txt\",'r') as f:\r\n record_ftrs[:] = f.read().split('\\n|||||\\n')\r\n record_ftrs.pop()\r\n\r\ndef printNames():\r\n for i in range(len(record_names)):\r\n print(i+1,':',record_names[i])\r\n\r\ndef showImageByID(image_id):\r\n x,y,w,h = 96,70,128,128\r\n img.clear()\r\n a = image.Image(\"/sd/image/\"+str(image_id)+\".jpg\")\r\n img.draw_image(a,(x,y))\r\n label = str(image_id)+\" \"+record_names[image_id-1]\r\n img.draw_string(x+2,y-30,label,scale=2)\r\n lcd.display(img)\r\n\r\ndef renameByID(image_id, new_name):\r\n record_names[image_id-1] = new_name\r\n showImageByID(image_id)\r\n with open(\"names.txt\",'w') as f:\r\n for name in record_names:\r\n f.write(name+'\\n')\r\n\r\ndef delInfoByID(image_id):\r\n # delete name\r\n del_name = record_names.pop(image_id-1)\r\n print('Delete info:', image_id, '.', del_name)\r\n with open(\"names.txt\",'w') as f:\r\n for name in record_names:\r\n f.write(name+'\\n')\r\n # delete face image\r\n os.remove('/sd/image/'+str(image_id)+\".jpg\")\r\n # delete feature\r\n\r\n\r\ndef tmpRenameAllImages():\r\n for i in range(len(record_names)):\r\n old_name = '/sd/image/'+str(i+1)+\".jpg\"\r\n new_name = '/sd/image/'+record_names[i]+\".jpg\"\r\n print(old_name, '->', new_name)\r\n os.rename(old_name, new_name)\r\n\r\ndef showImages():\r\n x,y,w,h = 96,70,128,128\r\n for i in range(len(record_names)):\r\n image_id = i+1\r\n name = record_names[i]\r\n img.clear()\r\n a = image.Image(\"/sd/image/\"+name+\".jpg\")\r\n img.draw_image(a,(x,y))\r\n label = str(image_id)+' '+record_names[i]\r\n img.draw_string(x+2,y-30,label,scale=2)\r\n lcd.display(img)\r\n utime.sleep_ms(1000)\r\n\r\n\r\n#print(os.listdir('/sd/image'))\r\nloadInfo()\r\nprintNames()\r\n#tmpRenameAllImages()\r\nshowImages()\r\n#showImageByID(10)\r\n#renameByID(10, 'Lei Zhao')\r\n","sub_path":"update_info.py","file_name":"update_info.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"524962505","text":"\"\"\"\n\tHotel1.0\n\thttp://store.diydrones.com/ArduPilot_Mega_IMU_Shield_OilPan_Rev_H_V1_0_p/br-0013-01.htm\n\tGyro X-Y\tIDG500\n\tGyro Z\t\tSZ500SMD\n\tAccel\t\tADXL335\n\t\n\"\"\"\nfrom random import gauss,random\nfrom math import cos,exp,pi,sin,sqrt\nfrom rotation import quaternion,vector\nfrom matplotlib.pyplot import plot,savefig\nfrom time import sleep\nclass ParticleFilter:\n\tdef __init__(self,num_particles,noise):\n\t\tself.particles=[]\n\t\tself.num_particles=num_particles\n\t\tfor i in range(self.num_particles):\n\t\t\tq=[1,0,0,0]\n\t\t\t#q=[gauss(1,.5),gauss(0,.5),gauss(0,.5),gauss(0,.5)]\n\t\t\tself.particles.append(particle(q,noise))\n\tdef update(self,sensor):\n\t\tgyro = vector(sensor[0:3])\n\t\taccel = vector(sensor[3:6])\n\t\tmagnet = [0,0,0]#vector(sensor[6:9])\n\t\tw=[self.particles[i].update(.004,gyro,accel,magnet) for i in range(self.num_particles)]\n\t\tsumW=sum(w)\n\t\tindex=int(random()*self.num_particles)\n\t\tbeta=0\n\t\tmw=2*max(w)\n\t\tnewP=[val for val in self.particles]\n\t\tfor i in range(self.num_particles):\n\t\t\tbeta+=random()*mw\n\t\t\twhile beta>w[index]:\n\t\t\t\tbeta-=w[index]\n\t\t\t\tindex=(index+1)%self.num_particles\n\t\t\tnewP[i]=self.particles[index]\n\t\tself.particles=newP\n\tdef getMean(self):\n\t\ts=quaternion([0,0,0,0])\n\t\tfor i in range(self.num_particles):\n\t\t\ts+=self.particles[i].rotation\n\t\treturn s/self.num_particles\n\trotation=property(getMean)\nclass particle:\n\tdip=1.22\n\tg=vector([1,0,0])\n\tm=vector([cos(dip),0,-sin(dip)])\n\tdef __init__(self,q,noise):\n\t\tself.rotation=quaternion(q)\n\t\tself.rotation.normalize()\n\t\tself.gyro_noise = noise[0]\n\t\tself.accel_noise = noise[1]\n\t\tself.magnet_noise = noise[2]\n\tdef move(self,gyro,period):\n\t\tg=gauss(gyro*pi/180,self.gyro_noise)\n\t\tself.rotation=self.rotation+.5*self.rotation*g*period\n\t\tself.rotation.normalize()\n\tdef measure(self,accel,magnet):\n\t\ta=particle.g.rotate(self.rotation)\n\t\tn=particle.m.rotate(self.rotation)\n\t\tprob=1\n\t\tfor i in range(3):\n\t\t\tprob*=gaussian(a[i],self.accel_noise, accel[i] )\n\t\t\tprob*=gaussian(n[i],self.magnet_noise,magnet[i])\n\t\treturn prob\n\tdef update(self,period,gyro,accel,magnet):\n\t\tself.move(gyro,period)\n\t\treturn self.measure(accel,magnet)\ndef gaussian(mu,sigma,x):\n\treturn exp(-((mu-x)**2)/(sigma**2)/2)/sqrt(2*pi*(sigma**2))\nif __name__==\"__main__\":\n\tpf=ParticleFilter(100,[.2,.2,.2])\n\twith open(\"Yaw.txt\",'r') as fin:\n\t\tquat=[]\n\t\tangles=[]\n\t\tfor line in fin:\n\t\t\tsensor=[float(str) for str in line.split()]#change this!\n\t\t\tpf.update(sensor)\n\t\t\tquat.append(pf.rotation)\n\t\t\tangles.append(pf.rotation.getEulerAngles())\n\t\t\tfin.next()\n\t\tplot(quat)\n\t\tsavefig(\"plot.png\")","sub_path":"Ghost/particle.py","file_name":"particle.py","file_ext":"py","file_size_in_byte":2507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"587083217","text":"#!usr/bin/python3\nimport matplotlib.pyplot as plt\nimport time\nfrom sklearn.datasets import load_iris\n\n\n#loading iris data\n\niris=load_iris()\n\n#print flower name\n\nfl_name=iris.target_names\nprint(fl_name)\n\n#print feature of iris\n\nfl_features=iris.feature_names\n\nprint(fl_features)\n\nfl_features_data=iris.data\n\n#loading flower name data\n\nfl_real_data=iris.target\n\nplt.xlabel(\"Setosa\")\nplt.ylabel(\"versicolor\")\nplt.title(\"IRIS FLOWER\")\n\nx1=fl_features_data[0:50]\n\nz1=fl_features_data[50:100]\nplt.scatter(x1,z1,label=\"Setosa\",marker=\"x\",c='g')\n\nplt.scatter(z1,x1,label=\"versicolor\",marker=\"*\",c='y')\n\nplt.legend()\nplt.show()\n\n\n","sub_path":"seves.py","file_name":"seves.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"519230515","text":"# hossz = 10\n# korongok = [True]*hossz\n# hosszok = [True, False, False] + [None]*(hossz-2)\nimport time\nfrom pprint import pprint\nimport random\n\ndef sor_kozeprol(max):\n \"\"\"\n Megadja a természetes számokat úgy, hogy középről kezdi és alternálva folytatja.\n Az elemek a [0, max) intervalumban vannak.\n \"\"\"\n szorzo = 1\n for i in range(1, max+1):\n yield max//2 + i//2*szorzo\n szorzo *= -1\n\n\nnyerok = {}\n\ndef kozeprol_kiszamol(lista):\n for h in lista:\n for j in sor_kozeprol(h):\n for vagh in [1, 2]:\n if j == h-1 and vagh == 2:\n continue\n ujlista = set(lista)\n ujlista.discard(h)\n if j == 0 or j == h-1:\n ujlista ^= {h-vagh}\n elif h > vagh:\n ujlista ^= {j}\n ujlista ^= {h-vagh-j}\n ujlista.discard(0)\n ujlista = tuple(sorted(ujlista))\n if ujlista not in nyerok:\n kozeprol_kiszamol(ujlista)\n\n # print(ujlista, nyerok[ujlista])\n\n if nyerok[ujlista]:\n nyerok[lista] = False\n return\n nyerok[lista] = True\n\ndef kiszamol(lista):\n for h in lista:\n for j in range(h):\n for vagh in [1, 2]:\n if j == h-1 and vagh == 2:\n continue\n ujlista = set(lista)\n ujlista.discard(h)\n if j == 0 or j == h-1:\n ujlista ^= {h-vagh}\n elif h > vagh:\n ujlista ^= {j}\n ujlista ^= {h-vagh-j}\n ujlista.discard(0)\n ujlista = tuple(sorted(ujlista))\n if ujlista not in nyerok:\n kiszamol(ujlista)\n\n # print(ujlista, nyerok[ujlista])\n\n if nyerok[ujlista]:\n nyerok[lista] = False\n return\n nyerok[lista] = True\n\n\ndef eltarolva_kiszamol(lista):\n for i, h in enumerate(lista):\n for j in range(h):\n for vagh in [1, 2]:\n if j == h-1 and vagh == 2:\n continue\n ujlista = list(lista)\n ujlista.pop(i)\n if j == 0 or j == h-1:\n ujlista.append(h-vagh)\n elif h > vagh:\n ujlista.append(j)\n ujlista.append(h-vagh-j)\n ujlista = [x for x in sorted(ujlista) if x != 0]\n if tuple(ujlista) not in nyerok:\n eltarolva_kiszamol(tuple(ujlista))\n # print(ujlista, nyerok[tuple(ujlista)])\n\n if nyerok[tuple(ujlista)]:\n nyerok[lista] = False\n return\n nyerok[lista] = True\n\ndef lassan_kiszamol(lista):\n for i, h in enumerate(lista):\n for j in range(h):\n for vagh in [1, 2]:\n if j == h-1 and vagh == 2:\n continue\n ujlista = lista[:]\n ujlista.pop(i)\n if j == 0 or j == h-1:\n ujlista.append(h-vagh)\n elif h > vagh:\n ujlista.append(j)\n ujlista.append(h-vagh-j)\n nyero = lassan_kiszamol(ujlista)\n # print(ujlista, nyero)\n \n if nyero:\n return False\n return True\n\n# elolrol_kiszamol((2, 2))\n# pprint(nyerok)\n# pprint(dict(filter(lambda x: x[1], nyerok.items())))\n\ndef tesztel(t1, t2):\n global nyerok\n o1 = o2 = 0\n for h in range(100):\n print(h)\n tesztadatok = [1, h] + [random.randint(1, h) for _ in range(random.randint(h//4, h//2))]\n tesztadatok.sort()\n tesztadatok = [tesztadatok[i+1]-tesztadatok[i] for i in range(len(tesztadatok)-1) if tesztadatok[i+1] != tesztadatok[i]]\n tesztadatok = tuple(sorted(set(tesztadatok)))\n nyerok = {}\n kezd = time.time()\n t1(tesztadatok)\n o1 += time.time()-kezd\n print(time.time()-kezd, end=\", \")\n n1 = dict(nyerok)\n nyerok = {}\n kezd = time.time()\n t2(tesztadatok)\n o2 += time.time()-kezd\n print(time.time()-kezd)\n print(f\"Összesen: {o1}, {o2}\")\n n2 = nyerok\n kozosek = n1.keys() & n2.keys()\n jo = all([n1[k] == n2[k] for k in kozosek])\n # print(jo, n1, n2)\n if not jo:\n return False\n return True\n\nprint(tesztel(kiszamol, elolrol_kiszamol))\n# kiszamol((4, 1))\n# print(nyerok)","sub_path":"Korongfordito/korongfordito.py","file_name":"korongfordito.py","file_ext":"py","file_size_in_byte":4610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"398207069","text":"# coding: utf8\n#\n# Project: Time-Resolved EXAFS\n# http://www.edna-site.org\n#\n# Copyright (C) 2013 European Synchrotron Radiation Facility\n# Grenoble, France\n#\n# Principal authors: Olof Svensson (svensson@esrf.fr) \n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n\n__author__ = \"Olof Svensson\"\n__contact__ = \"svensson@esrf.fr\"\n__license__ = \"GPLv3+\"\n__copyright__ = \"European Synchrotron Radiation Facility, Grenoble, France\"\n\nimport os, h5py, datetime\n\nfrom EDPluginExec import EDPluginExec\nfrom EDUtilsArray import EDUtilsArray\n\nfrom XSDataCommon import XSDataString\nfrom XSDataCommon import XSDataFile\n\nfrom XSDataWriteNexusFilev1_0 import XSDataInputWriteNexusFile\nfrom XSDataWriteNexusFilev1_0 import XSDataResultWriteNexusFile\n\nclass EDPluginExecWriteNexusFilev1_0( EDPluginExec ):\n \"\"\"\n This plugin writes a Nexus (HDF 5) file with the data given as input.\n \"\"\"\n\n def __init__( self ):\n EDPluginExec.__init__(self)\n self.setXSDataInputClass(XSDataInputWriteNexusFile) \n\n \n def process(self, _edObject = None):\n EDPluginExec.process(self)\n self.DEBUG(\"EDPluginExecWriteNexusFilev1_0.process\")\n xsDataInput = self.getDataInput()\n# print xsDataInput.marshal()\n fileName = str(xsDataInput.outputFileName.value)\n if xsDataInput.outputFileDirectory is None:\n fileDir = self.getWorkingDirectory()\n else:\n fileDir = str(xsDataInput.outputFileDirectory.value)\n# timestamp = \"2010-10-18T17:17:04-0500\"\n timestamp = datetime.datetime.now().isoformat()\n instrument = str(xsDataInput.instrument.value)\n # Create nexus file\n nexusFile = self.makeFile(os.path.join(fileDir, fileName), file_name=fileName,\n file_time=timestamp,\n instrument=instrument,\n creator=\"EDPluginExecWriteNexusFilev1_0\",\n NeXus_version=\"4.3.0\",\n HDF5_Version=h5py.version.hdf5_version,\n h5py_version=h5py.version.version) \n # Write main data\n nxentry = self.makeGroup(nexusFile, \"Result\", \"NXEntry\")\n for nexusGroup in xsDataInput.nexusGroup:\n groupTitle = str(nexusGroup.title.value)\n long_name = str(nexusGroup.long_name.value)\n nxdata = self.makeGroup(nxentry, groupTitle, \"NXdata\", long_name=long_name, interpretation=\"spectrum\")\n # First add the axes - if any...\n listAxisNames = []\n for xsDataNexusAxis in nexusGroup.axis:\n numpyAxisArray = EDUtilsArray.xsDataToArray(xsDataNexusAxis.axisData)\n self.makeDataset(nxdata, \n str(xsDataNexusAxis.title.value), \n numpyAxisArray, \n axis=xsDataNexusAxis.axis.value,\n primary=xsDataNexusAxis.primary.value, \n units=str(xsDataNexusAxis.units.value), \n long_name=str(xsDataNexusAxis.long_name.value))\n listAxisNames.append(str(xsDataNexusAxis.title.value))\n numpyDataArray = EDUtilsArray.xsDataToArray(nexusGroup.data)\n strAxisNames = \"\"\n bFirst = True\n for strAxisName in listAxisNames:\n if bFirst:\n strAxisNames += strAxisName\n bFirst = False\n else:\n strAxisNames += \":\"+strAxisName\n self.makeDataset(nxdata, groupTitle, numpyDataArray.transpose(), \n signal='1', # Y axis of default plot\n axes=strAxisNames, # name of X and Y axes\n long_name=long_name)\n pass\n xsDataResult = XSDataResultWriteNexusFile()\n xsDataResult.outputFilePath = XSDataFile(XSDataString(os.path.join(fileDir, fileName)))\n self.setDataOutput(xsDataResult)\n \n def makeFile(self, filename, **attr):\n f = h5py.File(filename, \"w\")\n self.add_attributes(f, attr)\n return f\n \n def add_attributes(self, parent, attr):\n \"\"\"\n add attributes to an h5py data item\n \n :param obj parent: h5py parent object\n :param dict attr: dictionary of attributes\n \"\"\"\n if attr and type(attr) == type({}):\n # attr is a dictionary of attributes\n for k, v in attr.items():\n parent.attrs[k] = v\n \n def makeDataset(self, parent, name, data = None, **attr):\n if data == None:\n obj = parent.create_dataset(name)\n else:\n obj = parent.create_dataset(name, data=data)\n self.add_attributes(obj, attr)\n return obj \n \n def makeGroup(self, parent, name, nxclass, **attr):\n group = parent.create_group(name)\n group.attrs[\"NX_class\"] = nxclass\n self.add_attributes(group, attr)\n return group\n","sub_path":"trexafsv1/plugins/EDPluginExecWriteNexusFile-v1.0/plugins/EDPluginExecWriteNexusFilev1_0.py","file_name":"EDPluginExecWriteNexusFilev1_0.py","file_ext":"py","file_size_in_byte":5568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"302991110","text":"'''\nBayesian ResNet for CIFAR10.\n\nResNet architecture ref:\n[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\n Deep Residual Learning for Image Recognition. arXiv:1512.03385\n'''\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.nn.init as init\nfrom bayesian_torch.layers import QuantizedConv2dFlipout\nfrom bayesian_torch.layers import QuantizedLinearFlipout\nfrom torch.nn.quantized import BatchNorm2d as QuantizedBatchNorm2d\nfrom torch.nn import Identity\n\n__all__ = [\n 'QResNet', 'qresnet18', 'qresnet34', 'qresnet50', 'qresnet101', 'qresnet152'\n]\n\ndef _weights_init(m):\n classname = m.__class__.__name__\n if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):\n init.kaiming_normal_(m.weight)\n\n\nclass LambdaLayer(nn.Module):\n def __init__(self, lambd):\n super(LambdaLayer, self).__init__()\n self.lambd = lambd\n\n def forward(self, x):\n return self.lambd(x)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1, option='A', bias=False):\n super(BasicBlock, self).__init__()\n self.conv1 = QuantizedConv2dFlipout(\n in_channels=in_planes,\n out_channels=planes,\n kernel_size=3,\n stride=stride,\n padding=1,\n bias=bias)\n self.bn1 = QuantizedBatchNorm2d(planes)\n self.conv2 = QuantizedConv2dFlipout(\n in_channels=planes,\n out_channels=planes,\n kernel_size=3,\n stride=1,\n padding=1,\n bias=bias)\n self.bn2 = QuantizedBatchNorm2d(planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != planes:\n if option == 'A':\n \"\"\"\n For CIFAR10 ResNet paper uses option A.\n \"\"\"\n self.shortcut = LambdaLayer(lambda x: F.pad(\n x[:, :, ::2, ::2],\n (0, 0, 0, 0, planes // 4, planes // 4), \"constant\", 0))\n elif option == 'B':\n self.shortcut = nn.Sequential(\n QuantizedConv2dFlipout(\n in_channels=in_planes,\n out_channels=self.expansion * planes,\n kernel_size=1,\n stride=stride,\n bias=bias), QuantizedBatchNorm2d(self.expansion * planes))\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.bn1(out)\n out = F.relu(out)\n out = self.conv2(out)\n out = self.bn2(out)\n sh = self.shortcut(x.contiguous()).contiguous()\n new_scale = max(out.q_scale(), sh.q_scale())\n out = torch.ops.quantized.add(out, sh, new_scale, 0)\n # out += self.shortcut(x)\n out = F.relu(out)\n return out\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, bias=False):\n super(Bottleneck, self).__init__()\n self.conv1 = QuantizedConv2dFlipout(\n in_channels=inplanes,\n out_channels=planes,\n kernel_size=1,\n bias=bias)\n self.bn1 =QuantizedBatchNorm2d(planes)\n self.conv2 = QuantizedConv2dFlipout(\n in_channels=planes,\n out_channels=planes,\n kernel_size=3,\n stride=stride,\n padding=1,\n bias=bias)\n self.bn2 = QuantizedBatchNorm2d(planes)\n self.conv3 = QuantizedConv2dFlipout(\n in_channels=planes,\n out_channels=planes * 4,\n kernel_size=1,\n bias=bias)\n self.bn3 = QuantizedBatchNorm2d(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n # out += residual\n new_scale = max(out.q_scale(), residual.q_scale())\n out = torch.ops.quantized.add(out, residual, new_scale, 0)\n out = self.relu(out)\n\n return out\n\nclass QResNet(nn.Module):\n def __init__(self, block, layers, num_classes=1000, bias=False):\n super(QResNet, self).__init__()\n self.inplanes = 64\n self.conv1 = QuantizedConv2dFlipout(\n in_channels=3,\n out_channels=64,\n kernel_size=7,\n stride=2,\n padding=3,\n bias=bias)\n self.bn1 = QuantizedBatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0], bias=bias)\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2, bias=bias)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2, bias=bias)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2, bias=bias)\n self.avgpool = nn.AvgPool2d(7, stride=1)\n self.fc = QuantizedLinearFlipout(\n in_features=512 * block.expansion,\n out_features=num_classes,\n )\n\n self.apply(_weights_init)\n\n def _make_layer(self, block, planes, blocks, stride=1, bias=False):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n QuantizedConv2dFlipout(in_channels=self.inplanes,\n out_channels=planes * block.expansion,\n kernel_size=1,\n stride=stride,\n bias=bias),\n QuantizedBatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, bias=bias))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes, bias=bias))\n\n return nn.Sequential(*layers)\n\n def quant_then_dequant(self, m, fuse_conv_bn=False): ## quantize only; need to rename this function\n for name, value in list(m._modules.items()):\n if m._modules[name]._modules:\n self.quant_then_dequant(m._modules[name], fuse_conv_bn=fuse_conv_bn)\n \n if \"QuantizedConv\" in m._modules[name].__class__.__name__:\n m._modules[name].quantize()\n m._modules[name].quantized_sigma_bias = None ### work around\n m._modules[name].dnn_to_bnn_flag = True ## since we don't compute kl in quantized models, this flag will be removed after refactoring\n\n if \"QuantizedLinear\" in m._modules[name].__class__.__name__:\n m._modules[name].quantize()\n m._modules[name].dnn_to_bnn_flag = True ## since we don't compute kl in quantized models, this flag will be removed after refactoring\n\n if fuse_conv_bn and \"BatchNorm2d\" in m._modules[name].__class__.__name__: # quite confusing, should be quantizedbatchnorm2d\n setattr(m, name, Identity())\n\n def forward(self, x):\n\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n for layer in self.layer1:\n x = layer(x)\n\n for layer in self.layer2:\n x = layer(x)\n\n for layer in self.layer3:\n x = layer(x)\n\n for layer in self.layer4:\n x = layer(x)\n\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x\n\n\ndef qresnet18(pretrained=False, **kwargs):\n model = QResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model\n\n\ndef qresnet34(pretrained=False, **kwargs):\n model = QResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n return model\n\n\ndef qresnet50(pretrained=False, **kwargs):\n model = QResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model\n\n\ndef qresnet101(pretrained=False, **kwargs):\n model = QResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n return model\n\n\ndef qresnet152(pretrained=False, **kwargs):\n model = QResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n return model\n\n\n\ndef test(net):\n import numpy as np\n total_params = 0\n\n for x in filter(lambda p: p.requires_grad, net.parameters()):\n total_params += np.prod(x.data.numpy().shape)\n print(\"Total number of params\", total_params)\n print(\n \"Total layers\",\n len(\n list(\n filter(lambda p: p.requires_grad and len(p.data.size()) > 1,\n net.parameters()))))\n\n\nif __name__ == \"__main__\":\n for net_name in __all__:\n if net_name.startswith('qresnet'):\n print(net_name)\n test(globals()[net_name]())\n print()\n","sub_path":"bayesian_torch/models/bayesian/quantized_resnet_flipout_large.py","file_name":"quantized_resnet_flipout_large.py","file_ext":"py","file_size_in_byte":9072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"557533154","text":"from django.conf.urls import url\r\nfrom . import views\r\n\r\nurlpatterns = [\r\n url(r'^$', views.index, name='index'),\r\n url(r'^client/$', views.client, name='client'),\r\n url(r'^client/new/$', views.new_client, name='new_client'),\r\n url(r'^client/all/$', views.all_client, name='all_client'),\r\n url(r'^client/all/(?P[0-9]+)$', views.id_client, name='id_client'),\r\n\r\n\r\n url(r'^task/$', views.task, name='task'),\r\n url(r'^send/$', views.send, name='send'),\r\n url(r'^send/new/$', views.new_ticket, name='new_ticket'),\r\n url(r'^alert/$', views.alert, name='alert'),\r\n]","sub_path":"crm/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"97805630","text":"import os\r\nfrom sub_fasta import Sub_fasta\r\nimport argparse\r\n\r\nclass Ex_fasta(object):\r\n def __init__(self, path):\r\n self.path = path\r\n\r\n def ex_fasta(self, f_list):\r\n with open(f_list + '/diff_list.txt') as diff_list:\r\n with open(path + '/all.fasta') as a_fasta:\r\n with open(f_list + '/diff.fasta', 'w') as d_fasta:\r\n flag = 0\r\n id_list = [i.strip() for i in diff_list]\r\n # print(id_list)\r\n for line in a_fasta:\r\n if line.startswith('>'):\r\n name = line.strip()[1:]\r\n if name in id_list:\r\n flag = 1\r\n d_fasta.write('>' + name + '\\n')\r\n else:\r\n flag = 0\r\n else:\r\n if flag == 0:\r\n pass\r\n else:\r\n d_fasta.write(line)\r\n\r\n def main(self):\r\n file_list = Sub_fasta.get_file(self, path)[1]\r\n for file in file_list:\r\n f_list = path + '/' + file\r\n self.ex_fasta(f_list)\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser(description='Extract fasta')\r\n parser.add_argument('-i', '--input', help='input the path of project', required=True)\r\n args = parser.parse_args()\r\n path = args.input\r\n ex_fa = Ex_fasta(path)\r\n ex_fa.main()\r\n","sub_path":"codefile/sub.py","file_name":"sub.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"80746701","text":"\"\"\"Tecan results upload epp functions.\"\"\"\n\nimport re\n\nfrom genologics.entities import Process\n\nfrom clarity_epp.upload.utils import txt_to_bool\n\n\ndef results_qc(lims, process_id):\n \"\"\"Upload tecan results to artifacts.\"\"\"\n process = Process(lims, id=process_id)\n concentration_range = map(float, re.findall('[\\d\\.]+', process.udf['Concentratiebereik (ng/ul)']))\n\n # Parse output file\n for output in process.all_outputs(unique=True):\n if output.name == 'Tecan Spark Output':\n tecan_result_file = output.files[0]\n tecan_file_order = ['Dx Fluorescentie (nM)', 'sample_name']\n tecan_file_part = -1\n\n measurements = {}\n sample_measurements = {}\n for line in lims.get_file_contents(tecan_result_file.id).data.split('\\n'):\n if not line.startswith('<>'):\n data = line.rstrip().split('\\t')\n for index, value in enumerate(data[1:]):\n value = value.rstrip()\n if value:\n coordinate = '{0}{1}'.format(data[0], str(index))\n if tecan_file_order[tecan_file_part] == 'Dx Fluorescentie (nM)':\n measurements[coordinate] = float(value)\n\n elif tecan_file_order[tecan_file_part] == 'sample_name':\n if value not in sample_measurements:\n sample_measurements[value] = [measurements[coordinate]]\n else:\n sample_measurements[value].append(measurements[coordinate])\n else:\n tecan_file_part += 1\n # Calculate linear regression for concentration\n # Assumes no std duplicates\n baseline_fluorescence = sample_measurements['Dx Tecan std 1'][0]\n fluorescence_values = [\n sample_measurements['Dx Tecan std 1'][0] - baseline_fluorescence,\n sample_measurements['Dx Tecan std 2'][0] - baseline_fluorescence,\n sample_measurements['Dx Tecan std 3'][0] - baseline_fluorescence,\n sample_measurements['Dx Tecan std 4'][0] - baseline_fluorescence,\n sample_measurements['Dx Tecan std 5'][0] - baseline_fluorescence,\n sample_measurements['Dx Tecan std 6'][0] - baseline_fluorescence,\n ]\n\n if process.udf['Reagentia kit'] == 'Quant-iT High-Sensitivity dsDNA kit':\n ng_values = [0, 5, 10, 20, 40, 60, 80, 100]\n fluorescence_values.append(sample_measurements['Dx Tecan std 7'][0] - baseline_fluorescence)\n fluorescence_values.append(sample_measurements['Dx Tecan std 8'][0] - baseline_fluorescence)\n elif process.udf['Reagentia kit'] == 'Quant-iT Broad Range dsDNA kit':\n ng_values = [0, 50, 100, 200, 400, 600]\n\n regression_slope = sum([x*y for x, y in zip(fluorescence_values, ng_values)]) / sum([x**2 for x in fluorescence_values])\n rsquared = 1 - (sum([(y - x*regression_slope)**2 for x, y in zip(fluorescence_values, ng_values)]) / sum([y**2 for y in ng_values]))\n\n # Set udf values\n process.udf['R-squared waarde'] = rsquared\n process.put()\n artifact_count = {}\n\n for artifact in process.all_outputs():\n if artifact.name not in ['Tecan Spark Output', 'Tecan Spark Samplesheet', 'check gemiddelde concentratie', 'Label plaat']:\n if len(artifact.samples) == 1: # Remove 'meet_id' from artifact name if artifact is not a pool\n artifact_name = artifact.name.split('_')[0]\n else:\n artifact_name = artifact.name\n\n # Set Average Concentratie fluorescentie\n sample_fluorescence = sum(sample_measurements[artifact_name]) / float(len(sample_measurements[artifact_name]))\n sample_concentration = ((sample_fluorescence - baseline_fluorescence) * regression_slope) / 2.0\n artifact.udf['Dx Concentratie fluorescentie (ng/ul)'] = sample_concentration\n\n # Set artifact Concentratie fluorescentie\n # Get artifact index == count\n if artifact_name not in artifact_count:\n artifact_count[artifact_name] = 0\n else:\n artifact_count[artifact_name] += 1\n\n artifact_fluorescence = sample_measurements[artifact_name][artifact_count[artifact_name]]\n artifact_concentration = ((artifact_fluorescence - baseline_fluorescence) * regression_slope) / 2.0\n artifact.udf['Dx Conc. goedgekeurde meting (ng/ul)'] = artifact_concentration\n\n # Set QC flags\n if artifact_name.startswith('Dx Tecan std'):\n artifact.qc_flag = 'PASSED'\n std_number = int(artifact_name.split(' ')[3])\n artifact.udf['Dx Conc. goedgekeurde meting (ng/ul)'] = ng_values[std_number - 1]\n artifact.udf['Dx Concentratie fluorescentie (ng/ul)'] = ng_values[std_number - 1]\n else:\n # Calculate measurement deviation from average.\n if concentration_range[0] <= sample_concentration <= concentration_range[1]:\n if len(sample_measurements[artifact_name]) == 1:\n artifact.qc_flag = 'PASSED'\n elif len(sample_measurements[artifact_name]) == 2:\n artifact_fluorescence_difference = abs(sample_measurements[artifact_name][0] - sample_measurements[artifact_name][1])\n artifact_fluorescence_deviation = artifact_fluorescence_difference / sample_fluorescence\n if artifact_fluorescence_deviation <= 0.1:\n artifact.qc_flag = 'PASSED'\n else:\n artifact.qc_flag = 'FAILED'\n else:\n artifact.qc_flag = 'FAILED'\n\n artifact.put()\n\n\ndef results_purify_normalise(lims, process_id):\n \"\"\"Upload tecan results to artifacts.\"\"\"\n process = Process(lims, id=process_id)\n\n # Find and parse Tecan Fluent 480 Output\n tecan_result = {}\n for result_file in process.result_files():\n if result_file.name == 'Tecan Fluent 480 Output':\n file_data = lims.get_file_contents(result_file.files[0].id).split('\\n')\n header = file_data[0].rstrip().split(';')\n for line in file_data[1:]:\n if line.rstrip():\n data = line.rstrip().split(';')\n tecan_result[data[header.index('SampleID')]] = {\n 'conc': float(data[header.index('Concentratie(ng/ul)')]),\n 'norm': txt_to_bool(data[header.index('Normalisatie')])\n }\n break # File found exit loop\n\n # Set concentration values on artifacts\n for artifact in process.analytes()[0]:\n sample = artifact.samples[0] # assume one sample per artifact\n artifact.udf['Dx Concentratie fluorescentie (ng/ul)'] = tecan_result[sample.udf['Dx Fractienummer']]['conc']\n artifact.udf['Dx QC status'] = tecan_result[sample.udf['Dx Fractienummer']]['norm']\n artifact.put()\n","sub_path":"clarity_epp/upload/tecan.py","file_name":"tecan.py","file_ext":"py","file_size_in_byte":7122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"228468700","text":"# Created by Partha at 11/17/19\n\nfrom selenium import webdriver\nfrom datetime import datetime\nfrom social.models import Post, Sentiment\nfrom crawler.models import Article\nimport time\nimport re\nimport json\nimport os\nfrom django.core.exceptions import FieldDoesNotExist, FieldError\nimport os\nfrom django.conf import settings\nfrom datetime import datetime, timedelta\nimport nltk\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\n\n\ndef daterange(start_date, end_date):\n for n in range(int((end_date - start_date).days)):\n yield start_date + timedelta(days=n)\n\n\ndef get_average_sentiment(corpus):\n sid = SentimentIntensityAnalyzer()\n article_sentiment = [0.0, 0.0, 0.0, 0.0] # neg, neu, pos, compound\n article_corpus_lines = corpus.split(\".\")\n for item in article_corpus_lines:\n temp = sid.polarity_scores(item)\n article_sentiment[0] += temp['neg']\n article_sentiment[1] += temp['neu']\n article_sentiment[2] += temp['pos']\n article_sentiment[3] += temp['compound']\n article_sentiment[0] = article_sentiment[0] / len(article_corpus_lines)\n article_sentiment[1] = article_sentiment[1] / len(article_corpus_lines)\n article_sentiment[2] = article_sentiment[2] / len(article_corpus_lines)\n article_sentiment[3] = article_sentiment[3] / len(article_corpus_lines)\n return article_sentiment\n\n\ndef sentiment_analysis():\n nltk.download('vader_lexicon')\n oldest_article = Article.objects.all().order_by('published_date')[0]\n oldest_post = Post.objects.all().order_by('published_date')[0]\n start_date = oldest_post.published_date.date() if oldest_post.published_date.date() >= oldest_article.published_date else oldest_article.published_date\n for date in daterange(start_date, datetime.now().date()):\n if not Sentiment.objects.filter(published_date=date).exists():\n\n article_corpus = \"\"\n for item in Article.objects.filter(published_date=date):\n article_corpus += item.body\n post_corpus = \"\"\n for item in Post.objects.filter(\n published_date__gte=datetime(year=date.year, month=date.month, day=date.day),\n published_date__lte=datetime(year=date.year, month=date.month, day=date.day, hour=23, minute=59,\n second=0)):\n post_corpus += item.body\n article_sentiment = get_average_sentiment(article_corpus)\n post_sentiment = get_average_sentiment(post_corpus)\n sentiment_object = Sentiment(published_date=date,\n article_neg=article_sentiment[0],\n article_neu=article_sentiment[1],\n article_pos=article_sentiment[2],\n article_compound=article_sentiment[3],\n post_neg=post_sentiment[0],\n post_neu=post_sentiment[1],\n post_pos=post_sentiment[2],\n post_compound=post_sentiment[3])\n sentiment_object.save()\n\n\ndef convert(value):\n if value:\n # determine multiplier\n multiplier = 1\n if value.endswith('K'):\n multiplier = 1000\n value = value[0:len(value) - 1] # strip multiplier character\n elif value.endswith('M'):\n multiplier = 1000000\n value = value[0:len(value) - 1] # strip multiplier character\n\n # convert value to float, multiply, then convert the result to int\n return int(float(value) * multiplier)\n\n else:\n return 0\n\n\ndef get_posts():\n if os.path.exists(\"facebook_config.json\"):\n\n config_file = json.load(open(\"facebook_config.json\", \"r\"))\n if 'email' in config_file.keys() and 'password' in config_file.keys() and \"lookup_link\" in config_file.keys():\n usr = config_file['email']\n pwd = config_file['password']\n lookup_link = config_file['lookup_link']\n else:\n print(\"Not enough parameters\")\n raise FieldError(\"Not enough parameters in config.json\")\n\n else:\n print(\"File does not exists.\")\n raise FieldDoesNotExist(\"Config file does not exist.\")\n option = webdriver.ChromeOptions()\n option.add_argument(\"--disable-geolocation\")\n option.add_argument(\"--disable-notifications\")\n option.add_argument(\"--headless\") # Runs Chrome in headless mode.\n option.add_argument('--no-sandbox') # # Bypass OS security model\n option.add_argument('start-maximized')\n option.add_argument('disable-infobars')\n option.add_argument(\"--disable-extensions\")\n option.add_argument(\" — incognito\")\n primary_browser_window = webdriver.Chrome(\n executable_path=os.path.join(settings.BASE_DIR, 'driver/chromedriver'),\n chrome_options=option)\n primary_browser_window.get(\"https://www.facebook.com/\")\n\n username_box = primary_browser_window.find_element_by_name('email')\n username_box.send_keys(usr)\n\n password_box = primary_browser_window.find_element_by_name('pass')\n password_box.send_keys(pwd)\n try:\n login_box = primary_browser_window.find_element_by_id('loginbutton')\n except Exception as ex:\n login_box = primary_browser_window.find_element_by_name('login')\n login_box.click()\n\n primary_browser_window.get(lookup_link)\n scroll_pause_time = 10\n loop_time = 10\n for i in range(0, loop_time):\n # Scroll down to bottom\n primary_browser_window.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n time.sleep(scroll_pause_time)\n\n all_posts = primary_browser_window.find_elements_by_css_selector(\"[class='_1dwg _1w_m _q7o']\")\n all_comments = primary_browser_window.find_elements_by_css_selector(\"[class='_3hg- _42ft']\")\n all_shares = primary_browser_window.find_elements_by_css_selector(\"[class='_3rwx _42ft']\")\n all_reactions = primary_browser_window.find_elements_by_css_selector(\"[class='_3dlh _3dli']\")\n\n for post, comment, share, reaction in zip(all_posts, all_comments, all_shares, all_reactions):\n\n try:\n post_time = post.find_element_by_css_selector(\"[class='_5ptz timestamp livetimestamp']\")\n except Exception as ex:\n post_time = post.find_element_by_css_selector(\"[class='_5ptz']\")\n post_time_as_unix_timestamp = post_time.get_attribute('data-utime')\n post_time = datetime.utcfromtimestamp(int(post_time_as_unix_timestamp))\n\n try:\n post_text = post.find_element_by_css_selector(\"[data-testid='post_message']\").text\n except Exception as ex:\n post_text = \"\"\n post_content = post_text.replace(\"Provide translation to Bengali\", \"\")\n\n reaction_count = convert(re.findall(\"[0-9.]+K?\", reaction.text)[0])\n comment_count = convert(re.findall(\"[0-9.]+K?\", comment.text)[0])\n share_count = convert(re.findall(\"[0-9.]+K?\", share.text)[0])\n\n if not Post.objects.filter(body=post_content, published_date=post_time).exists():\n post = Post(body=post_content, reaction=reaction_count, comment=comment_count, share=share_count,\n published_date=post_time)\n post.save()\n\n else:\n\n Post.objects.filter(body=post_content, published_date=post_time).update(reaction=reaction_count,\n share=share_count,\n comment=comment_count)\n primary_browser_window.quit()\n\n\nif __name__ == '__main__':\n get_posts()\n","sub_path":"WebApp/social/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":7763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"56849620","text":"import sys\nfrom typing import TextIO\n\nfrom starlette.config import Config\nfrom starlette.datastructures import Secret\n\nfrom app.core.datastructures import DatabaseURL\n\nconfig = Config(\".env\")\n\n# general settings\nPROJECT_NAME: str = config(\"PROJECT_NAME\", cast=str, default=\"Ayashige\")\nVERSION: str = config(\"VERSION\", cast=str, default=\"0.1.0\")\nDESCRIPTION: str = config(\n \"DESCRIPTION\",\n cast=str,\n default=\"Ayashige provides a list of suspicious newly updated domains as a JSON feed\",\n)\n\nDEBUG: bool = config(\"DEBUG\", cast=bool, default=False)\nTESTING: bool = config(\"TESTING\", cast=bool, default=False)\n\n# log settings\nLOG_FILE: TextIO = config(\"LOG_FILE\", default=sys.stderr)\nLOG_LEVEL: str = config(\"LOG_LEVEL\", cast=str, default=\"DEBUG\")\nLOG_BACKTRACE: bool = config(\"LOG_BACKTRACE\", cast=bool, default=True)\n\n# Redis settings\nREDIS_URL: DatabaseURL = config(\n \"REDIS_URL\", cast=DatabaseURL, default=\"redis://localhost:6379\"\n)\n\n# ST settings\nSECURITYTRAILS_API_KEY = config(\"SECURITYTRAILS_API_KEY\", cast=Secret, default=\"\")\n","sub_path":"app/core/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"255647315","text":"\"\"\"\nGiven a string array words, find the maximum value of length(word[i]) * length(word[j]) where the two words do not share common letters. You may assume that each word will contain only lower case letters. If no such two words exist, return 0.\n\nExample 1:\nGiven [\"abcw\", \"baz\", \"foo\", \"bar\", \"xtfn\", \"abcdef\"]\nReturn 16\nThe two words can be \"abcw\", \"xtfn\".\n\nExample 2:\nGiven [\"a\", \"ab\", \"abc\", \"d\", \"cd\", \"bcd\", \"abcd\"]\nReturn 4\nThe two words can be \"ab\", \"cd\".\n\nExample 3:\nGiven [\"a\", \"aa\", \"aaa\", \"aaaa\"]\nReturn 0\nNo such pair of words.\n\"\"\"\nclass Solution(object):\n def maxProduct(self, words):\n \"\"\"\n :type words: List[str]\n :rtype: int\n \"\"\"\n max_product = 0\n # In 'mask' only first 26 bits of each int will be used\n # if a bit is '1' means corresponding letter presents\n # e.g., 0 bit corresponds to 'a', 1 bit corresponds to 'b'\n mask = []\n for word in words:\n num = 0\n for c in word:\n num |= 1 << (ord(c)-ord('a'))\n mask.append(num)\n # find pairs without intersection\n for i in range(len(words)):\n for j in range(i+1, len(words)):\n if mask[i] & mask[j] == 0:\n max_product = max(max_product, \\\n len(words[i])*len(words[j]))\n return max_product\n \n ","sub_path":"maximum_product_of_word_lengths.py","file_name":"maximum_product_of_word_lengths.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"350745526","text":"teeth = [95,103,71,99,114,64,95,53,97,114,109,11,2,21,45,2,26,81,54,14,118,108,117,27,115,43,70,58,107]\n\n\ndef greedy_algorithm(amount):\n list_of_coins = []\n currencies = [20, 10, 5, 1]\n for coins in currencies:\n list_of_coins += [amount // coins]\n amount = amount % coins\n return list_of_coins\n\n\ndef toothfairy(teeth):\n coins = []\n for weight in teeth:\n coins.append(greedy_algorithm(weight))\n return coins\n\n\ndef beauty_print(teeth):\n for i in range(len(teeth)):\n print(f\"20: {teeth[i][0]}, 10: {teeth[i][1]}, 5: {teeth[i][2]}, 1: {teeth[i][3]}\")\n\n\nprint(toothfairy(teeth))\nbeauty_print(toothfairy(teeth))\n\n\n\n","sub_path":"Øving 6/Tannfeen/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"614311151","text":"# 计算扑克牌\nimport numpy as np\nimport random\n\npk = np.array([np.arange(13)+1 for i in range(4)])\n\na = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]\nprint(random.sample(a, 1))\nfor i in range(4):\n temp = random.sample(a, 1)\n","sub_path":"var/notes/python/test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"554462006","text":"import numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport tensorflow.examples.tutorials.mnist.input_data as mnist\n\nfrom networks import generator, d_loss, g_loss, real_images, seed\n\n### hyperparameters\nLEFTOUT_RATE = 0.001\nBATCH_SIZE = 100\nEPOCHS = 100\n\n### trainer function\ntvars = tf.trainable_variables()\nd_vars = [var for var in tvars if \"discriminator\" in var.name]\ng_vars = [var for var in tvars if \"generator\" in var.name]\n\nd_trainer = tf.train.AdamOptimizer(LEFTOUT_RATE).minimize(d_loss, var_list=d_vars)\ng_trainer = tf.train.AdamOptimizer(LEFTOUT_RATE).minimize(g_loss, var_list=g_vars)\n\n### load training dataset\ntraining_set = mnist.read_data_sets(\"MNIST_data\")\nsamples = []\n\nwith tf.Session() as s:\n s.run(tf.global_variables_initializer())\n for epoch in range(EPOCHS):\n batches = training_set.train.num_examples // BATCH_SIZE\n for i in range(batches):\n batch = training_set.train.next_batch(BATCH_SIZE)\n batch_images = batch[0].reshape(BATCH_SIZE, 784)\n batch_images = batch_images * 2 - 1\n batch_seed = np.random.uniform(-1, 1, size=(BATCH_SIZE, 100))\n _ = s.run(\n d_trainer, feed_dict={real_images: batch_images, seed: batch_seed}\n )\n _ = s.run(g_trainer, feed_dict={seed: batch_seed})\n\n print(\"on epoch{}\".format(epoch))\n\n sample_seed = np.random.uniform(-1, 1, size=(1, 100))\n gen_sample = s.run(generator(seed, reuse=True), feed_dict={seed: sample_seed})\n\n samples.append(gen_sample)\n\nplt.imshow(samples[0].reshape(28, 28))\nplt.imshow(samples[99].reshape(28, 28))\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"505857392","text":"from individual_income_cn_2019 import count_tax_2019, min_tax_2019\nimport argparse\n\ndef bonus_min(args):\n result = min_tax_2019(args.s,args.d)\n print(f'全年需要缴纳的个人所得税:{result[0]}')\n print(f'月薪:{result[1]}')\n print(f'年终奖:{result[2]}')\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='个人所得税计算工具')\n\n subparsers = parser.add_subparsers()\n\n parser_min = subparsers.add_parser('bonus_min')\n parser_min.add_argument(\n 's', help='年薪', type=float)\n parser_min.add_argument(\n '-d', help='月度专项扣除', type=float, default=0.0)\n parser_min.set_defaults(func=bonus_min)\n\n args = parser.parse_args('bonus_min 200000'.split())\n args.func(args) \n parser.parse_args(['-h'])\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"435832170","text":"from flask import Flask, jsonify, request\nfrom flask_swagger import swagger\nfrom api import handler\nfrom api import db\n\ndef init_server():\n\n app = Flask(__name__)\n\n @app.route('/')\n def index():\n swag = swagger(app)\n swag['info']['version'] = \"1.0\"\n swag['info']['title'] = \"Network coverage endpoint\"\n return jsonify(swag)\n\n\n @app.route('/api/v1/search')\n def search():\n\n query = request.args.get('q')\n \n if not (query is None):\n city = handler.retrieve_city(query)\n return jsonify(db.search(city)), 200\n else:\n return jsonify('you can search a network coverage by providing a query string ?q=xxxxx'), 200\n\n\n return app\n","sub_path":"api/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"170663568","text":"import pygame\nfrom pygame.locals import *\n\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\nimport numpy as np\n\n\n\nfrom cube import cube\n\ncubes = []\nfor _ in range(10):\n temp = []\n for _ in range(10):\n temp.append(cube())\n cubes.append(temp)\n\n\ncordX = 0\ncordY = 0\nfor i in range(10):\n for j in range(10):\n cubes[i][j].add_to_x(cordX)\n cubes[i][j].add_to_y(cordY)\n cordX += 2\n cordX = 0\n cordY += 2\n\n\n\n# pygame setup\npygame.init()\ndisplay = (800,600)\npygame.display.set_mode(display, DOUBLEBUF|OPENGL)\n\n# camera setup\ngluPerspective(45, (display[0]/display[1]), 0.1, 70.0)\nglTranslatef(0.0, 0.0, -40)\n\n\n# frame loop\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)\n\n glRotatef(1, 0, 1, 0)\n\n for i in range(10):\n for j in range(10):\n cubes[i][j].draw()\n\n pygame.display.flip()\n pygame.time.wait(10)\n\n","sub_path":"00-modules/external_modules/opengl/4/lots_of_cubes.py","file_name":"lots_of_cubes.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"180750981","text":"# https://www.hackerearth.com/practice/algorithms/sorting/quick-sort/tutorial/\ninputs = iter([\n \"5\",\n \"4 3 1 5 2\"\n])\n\nimport random\n\ndef next_line():\n return next(inputs)\n\ndef swap(arr, j, k):\n arr[j], arr[k] = arr[k], arr[j]\n\ndef partition(arr, start, end):\n i = start+1\n piv = arr[start]\n for j in range(start+1, end+1):\n if arr[j] < piv:\n swap(arr, i, j)\n i += 1\n swap(arr, start, i-1)\n return i-1\n\ndef rand_partition(arr, start, end):\n p = random.randint(start, end)\n swap(arr, start, p)\n return partition(arr, start, end)\n\ndef quick_sort(arr, start, end):\n if start < end:\n piv = rand_partition(arr, start, end)\n quick_sort(arr, start, piv-1)\n quick_sort(arr, piv+1, end)\n\nn = int(next_line())\na = [int(x) for x in next_line().split(\" \")]\nquick_sort(a, 0, n-1)\nfor j in a:\n print(j, end=\" \")\n","sub_path":"algorithms/quick-sort.py","file_name":"quick-sort.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"43578681","text":"from Parser import parse\nfrom OutputClass import OutputClass\n\nparametri, corse = parse('./input/c_no_hurry.in')\nnCars = int(parametri[2])\ntMax = int(parametri[5])\n\ndef getDeadline(ride):\n return int(ride[2][1])\n\ndef calcolaDistanza(inizio, arrivo):\n if type(arrivo[0]) == int:\n return abs(arrivo[0]-int(inizio[0])) + abs(arrivo[1]-int(inizio[1]))\n else:\n return abs(int(arrivo[0]) - int(inizio[0])) + abs(int(arrivo[1]) - int(inizio[1]))\n\ncorse.sort(key=getDeadline)\n\ncars = [[0,0] for i in range(nCars)]\nout = OutputClass('hurry_out.txt')\nfor c in range(nCars):\n print(\"macchina\", c)\n i = 0\n ta = 0\n a = 0\n while i < len(corse) and ta < tMax:\n tfc = int(corse[i][2][1])\n tic = int(corse[i][2][0])\n distInizio = calcolaDistanza(corse[i][0], cars[c])\n distFine = calcolaDistanza(corse[i][0], corse[i][1])\n if tfc - max(tic, ta) > distInizio + distFine:\n a+=1\n print(str(c), str(corse[i][3]))\n print('accettate', a)\n out.addCar(str(c), str(corse[i][3]))\n ta += distInizio + distFine\n corse.remove(corse[i])\n i -= 1\n i += 1\n if a == 0:\n break\n\nout.creaFileOutput()\n\nwith open('hurry_out.txt','a') as file:\n for r in range(nCars-(c)):\n file.write('0\\n')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"231382375","text":"\"\"\"\nObtain module dependencies for Cradlepoint SDK\n\"\"\"\n\nimport os.path\n\n# from make import EXIT_CODE_MISSING_DEP\n\n\nclass BuildDependencyList(object):\n\n # these are modules on Cradlepoint FW with Python 3.3 (/usr/lib/python3.3)\n COMMON_STD_MODULES = [\n \"OpenSSL\", \"__future__\", \"abc\", \"argparse\", \"base64\", \"bisect\",\n \"calendar\", \"cgi\", \"chunk\", \"cmd\", \"code\", \"codecs\", \"codeop\",\n \"collections\", \"configparser\", \"contextlib\", \"copy\", \"copyreg\",\n \"ctypes\", \"datetime\", \"dateutil\", \"difflib\", \"dnslib\", \"dnsproxy\",\n \"dummy_threading\", \"email\", \"encodings\", \"fnmatch\", \"functools\",\n \"getopt\", \"gettext\", \"glob\", \"gzip\", \"hashlib\", \"heapq\", \"hmac\",\n \"html\", \"http\", \"importlib\", \"io\", \"ipaddress\", \"json\", \"keyword\",\n \"linecache\", \"locale\", \"logging\", \"lzma\", \"mailbox\", \"mimetypes\",\n \"numbers\", \"os\", \"pickle\", \"pkgutil\", \"platform\", \"pprint\",\n \"py_compile\", \"pyrad\", \"queue\", \"quopri\", \"random\", \"re\", \"reprlib\",\n \"runpy\", \"serial\", \"shlex\", \"smtplib\", \"socket\", \"socketserver\",\n \"sre_compile\", \"sre_constants\", \"sre_parse\", \"ssl\", \"stat\", \"string\",\n \"stringprep\", \"struct\", \"subprocess\", \"tarfile\", \"telnetlib\",\n \"textwrap\", \"threading\", \"token\", \"tokenize\", \"traceback\", \"tty\",\n \"types\", \"urllib\", \"uu\", \"uuid\", \"weakref\", \"xml\",\n\n # these exist on router, but you probably should not be using!\n # are either Cradlepoint-specific, or obsolete, or depend on STDIO\n # access you lack; shutil & tempfile in this list because large file\n # ops on router flash is risky\n \"_compat_pickle\", \"_pyio\", \"_strptime\", \"_weakrefset\", \"bdb\",\n \"compileall\", \"cProfile\", \"cp\", \"cpsite\", \"dis\", \"genericpath\",\n \"imp\", \"inspect\", \"lib-dynload\", \"opcode\", \"pdb\",\n \"posixpath\", \"shutil\", \"ssh\", \"tempfile\", \"tornado\", \"warnings\",\n\n # exist, but not in /usr/lib/python3.3? builtin?\n # maybe inside cradlepoint.cpython-33m?\n \"binascii\", \"errno\", \"fcntl\", \"ioctl\", \"gc\", \"math\",\n \"pydoc\", \"select\", \"sys\", \"time\"\n ]\n\n # others? _ssh.cpython-33m.so, cradlepoint.cpython-33m.so\n\n # these are used in sys.platform != CP router only\n COMMON_PIP = [\n \"requests\", \"requests.auth\", \"requests.exceptions\"\n ]\n\n def __init__(self):\n\n # # load the CP sample built-in list, will be of form: {\n # # \"cp_lib.clean_ini\": [],\n # # \"cp_lib.cp_logging\": [\"cp_lib.hw_status\",\n # \"cp_lib.load_settings\"],\n # # }\n # json_name = os.path.join(\"tools\", \"module_dependency.json\")\n # file_han = open(json_name, \"r\")\n # self._cp_lib_details = json.load(file_han)\n # file_han.close()\n\n self.dep_list = []\n\n self.ignore_pip = False\n\n self.logger = None\n\n return\n\n def add_file_dependency(self, file_name=None):\n \"\"\"\n Given a single file which ends in .py, scan for import lines.\n\n :param str file_name:\n :return:\n \"\"\"\n # self.logger.debug(\"add_file_dependency({0})\".format(file_name))\n\n if not isinstance(file_name, str):\n raise TypeError\n\n if not os.path.exists(file_name):\n raise FileNotFoundError(\n \"module_dependency: file({}) doesn't exist.\".format(file_name))\n\n if not os.path.isfile(file_name):\n raise FileNotFoundError(\n \"module_dependency: file({}) doesn't exist.\".format(file_name))\n\n value = os.path.splitext(file_name)\n # should be like ('network\\\\tcp_echo\\\\tcp_echo', '.py') or\n # ('network\\\\tcp_echo', '')\n # self.logger.debug(\"value({})\".format(value))\n if value[1] != \".py\":\n # self.logger.debug(\n # \"module_dependency: file({}) is not PYTHON (.py)normal file.\")\n return None\n\n # at this point, the file should be a .PY file at least\n file_han = open(file_name)\n for line in file_han:\n offset = line.find(\"import\")\n if offset >= 0:\n # then we found a line\n tokens = line.split()\n\n if len(tokens) >= 2 and tokens[0] == \"import\":\n # then like \"import os.path\" or \"import os, sys, socket\"\n\n if tokens[1][-1] != \",\":\n # then is like \"import os.path\"\n self.logger.debug(\"add_file_dep:{}\".format(tokens[1]))\n self.add_if_new(tokens[1])\n\n else: # like \"import os, sys, socket\"\n for name in tokens[1:]:\n self.logger.debug(\"token({})\".format(name))\n if name[-1] == ',':\n value = name[:-1]\n else:\n value = name\n self.add_if_new(value)\n\n elif len(tokens) >= 4 and tokens[0] == \"from\" and \\\n tokens[2] == \"import\":\n # then is like \"from cp_lib.cp_self.logger import\n # get_recommended_logger\"\n self.add_if_new(tokens[1])\n\n file_han.close()\n\n # self.logger.debug(\"module_dependency: {}\".format(self.dep_list))\n return self.dep_list\n\n def add_if_new(self, new_name):\n \"\"\"\n Given new module, see if already known or to be skipped\n\n :param str new_name: the gathering list of names\n :return int: return count of names added\n \"\"\"\n # self.logger.debug(\"add_if_new({0})\".format(new_name))\n\n if new_name in self.COMMON_STD_MODULES:\n # scan through existing STD LIB like \"self.logger\", \"sys\", \"time\"\n # self.logger.debug(\"Mod({}) is in std lib.\".format(new_name))\n return 0\n\n # if self.ignore_pip and new_name in self.COMMON_PIP:\n if new_name in self.COMMON_PIP:\n # scan through existing STD LIB like \"requests\"\n self.logger.debug(\"Mod({}) is in PIP lib.\".format(new_name))\n return 0\n\n # handle importing sub modules, like os.path or self.logger.handlers\n if new_name.find('.') >= 0:\n # then we have a x.y\n name = new_name.split('.')\n if name[0] in self.COMMON_STD_MODULES:\n # self.logger.debug(\"Mod({}) is in std lib.\".format(new_name))\n return 0\n\n if new_name in self.dep_list:\n # scan through existing names\n self.logger.debug(\"Mod({}) already known.\".format(new_name))\n return 0\n\n # if still here, then is a new name\n self.logger.debug(\"Mod({}) is NEW!\".format(new_name))\n\n # convert from network.tcp_echo.ftplib to network/tcp_echo/ftplib\n path_name = new_name.replace('.', os.sep)\n\n added_count = 0\n if not os.path.isdir(path_name):\n # only ADD is not a subdirectory\n self.dep_list.append(new_name)\n added_count = 1\n\n # handle is file or sub-directory\n self.logger.info(\"_add_recurse:{} {}\".format(path_name, new_name))\n added_count += self._add_recurse(path_name, new_name)\n\n return added_count\n\n def _add_recurse(self, path_name, dot_name):\n \"\"\"\n Assume new_name is like \"network/tcp_echo/xmlrpc/\" or\n \"network/tcp_echo/ftplib.py\"\n\n :param str path_name: the path name, like \"network/tcp_echo/xmlrpc\"\n :param str dot_name: the dot name, like \"network.tcp_echo.xmlrpc\"\n :return int: return if files were added\n \"\"\"\n # self.logger.debug(\n # \"_add_recurse({0},{1})\".format(path_name, dot_name))\n\n added_count = 0\n if os.path.isdir(path_name):\n # then is module, such as xmlrpc, with includes:\n # network/tcp_echo/xmlrpc/__init__.py\n # network/tcp_echo/xmlrpc/client.py\n # network/tcp_echo/xmlrpc/server.py\n self.logger.debug(\"Recurse into directory ({})\".format(path_name))\n\n dir_list = os.listdir(path_name)\n for name in dir_list:\n if name == \"__pycache__\":\n self.logger.debug(\n \" skip known skipper ({})\".format(name))\n continue\n\n if name == \"test\":\n self.logger.debug(\n \" skip known skipper ({})\".format(name))\n continue\n\n if name[0] == \".\":\n self.logger.debug(\n \" skip pattern skipper ({})\".format(name))\n continue\n\n # still here, see if file or subdirectory\n file_name = os.path.join(path_name, name)\n if os.path.isdir(file_name):\n # then another sub-directory\n added_count += self._add_recurse(\n file_name, dot_name + '.' + name)\n\n else: # assume is a file?\n # for example, name=client.py\n if name.endswith(\".py\"):\n self.dep_list.append(file_name)\n added_count += 1\n try:\n self.logger.debug(\n \"Recurse into s-file ({})\".format(file_name))\n self.add_file_dependency(file_name)\n\n except FileNotFoundError:\n self.logger.error(\n \"Could NOT find above dependency within\" +\n \"({})\".format(file_name))\n # sys.exit(EXIT_CODE_MISSING_DEP)\n\n else:\n # expects network.tcp_echo.xmlrpc.something.txt\n value = path_name + os.sep + name\n self.logger.debug(\n \"Add file as dependency({})\".format(value))\n self.dep_list.append(value)\n added_count += 1\n\n else:\n # might be file, like network/tcp_echo/ftplib.py as\n # network.tcp_echo.ftplib\n if not path_name.endswith(\".py\"):\n path_name += \".py\"\n self.logger.debug(\"Recurse into d-file ({})\".format(path_name))\n self.add_file_dependency(path_name)\n\n return added_count\n","sub_path":"tools/module_dependency.py","file_name":"module_dependency.py","file_ext":"py","file_size_in_byte":10484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"239740852","text":"import torch \nimport numpy as np\n\nfrom torch.utils.data import Dataset\n\nfrom which_device import device\n\ndef trim_psf(psf, slen): \n \n # trims the psf array\n assert len(psf.shape) == 2\n assert psf.shape[0] == psf.shape[1]\n \n # size of psf should be odd\n assert ((psf.shape[0] - 1) % 2) == 0\n \n psf_slen = psf.shape[-1]\n psf_center = (psf_slen - 1) / 2\n \n r = np.floor(slen / 2)\n l_indx = int(psf_center - r)\n u_indx = int(psf_center + r + 1)\n\n return psf[l_indx:u_indx, l_indx:u_indx]\n\n\nclass CenteredStarsData(Dataset):\n\n def __init__(self, \n psf, \n n_images = 60000, \n log10_flux_range = [3, 5.], \n background = 800):\n \n self.psf = psf\n self.slen = psf.shape[0]\n assert psf.shape[1] == psf.shape[0]\n \n self.n_images = n_images\n \n # range for log10(flux)\n self.lflux_max = log10_flux_range[1]\n self.lflux_min = log10_flux_range[0]\n assert self.lflux_max >= self.lflux_min\n \n # sky background \n self.background = background\n \n def __len__(self): \n return self.n_images\n \n def __getitem__(self, indx): \n \n # uniform draw \n u = torch.rand(size = (1,), device = device)\n \n # sample log flux \n log10_flux = u * (self.lflux_max - self.lflux_min) + self.lflux_min\n \n # get flux \n flux = 10**log10_flux\n \n # construct image \n image = self.psf * flux + self.background\n image += torch.sqrt(image) * torch.randn(size = image.shape, device = device)\n \n # encoder expects a band: \n image = image.view(1, self.slen, self.slen)\n \n return {'image': image, \n 'flux': flux}","sub_path":"case_studies/simple_flux_experiment/utils/dataset_lib.py","file_name":"dataset_lib.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"86768269","text":"#!/usr/bin/env python3\n\nimport os\nimport argparse\nimport subprocess\nimport sys\n\n\nclass resultMaker:\n def __init__(self, testFileDir, outputFile):\n self.outputFile = outputFile\n self.testFileDir = testFileDir\n self.numberOfIterations = 1\n self.graphTypes = [\"vg\", \"hg\", \"pg\", \"xg\", \"og\"]\n self.testTypes = [\"convert\", \"serialize\", \"deserialize\", \"access\"]\n\n def runFiles(self):\n with open(self.outputFile, \"w\") as outputFile:\n for fileName in os.listdir(self.testFileDir):\n if not fileName.endswith(\".gfa\"):\n continue\n\n print(\"testing on \" + fileName, file = sys.stderr)\n for graphType in self.graphTypes:\n print(\"\\ttesting graph type \" + graphType, file = sys.stderr)\n for i in range(0,self.numberOfIterations):\n \n # construct from GFA and serialize\n constructErr, graphFile = self.getStatistics(\"serialize\", graphType, self.testFileDir, fileName, True)\n # load from serialized, but do nothing\n loadErr, dummy = self.getStatistics(\"deserialize\", graphType, self.testFileDir, graphFile)\n # load from serliazed and time accesses to graph features\n accessErr, dummy = self.getStatistics(\"access\", graphType, self.testFileDir, graphFile)\n \n # parse the stderr output\n constructStats = self.parseData(constructErr)\n loadStats = self.parseData(loadErr) \n accessStats = self.parseData(accessErr) \n \n # match (roughly) the format that Emily's plotting script expects\n \n row = [fileName, \"construct\", graphType, constructStats[\"realTime\"], constructStats[\"usrTime\"], \n constructStats[\"sysTime\"], constructStats[\"memoryUsage\"], \"NA\", \"NA\"]\n print(\"\\t\".join(str(val) for val in row), file = outputFile)\n \n row = [fileName, \"deserialize\", graphType, loadStats[\"realTime\"], loadStats[\"usrTime\"], \n loadStats[\"sysTime\"], loadStats[\"memoryUsage\"], \"NA\", \"NA\"]\n print(\"\\t\".join(str(val) for val in row), file = outputFile)\n \n for accessType in [\"nodes\", \"edges\", \"paths\"]:\n numItems, accessTime = accessStats[accessType]\n row = [fileName, accessType, graphType, accessStats[\"realTime\"], accessStats[\"usrTime\"], \n accessStats[\"sysTime\"], accessStats[\"memoryUsage\"], numItems, accessTime]\n print(\"\\t\".join(str(val) for val in row), file = outputFile)\n \n # clean up the graph we made\n os.remove(os.path.join(self.testFileDir, graphFile))\n\n def parseTime(self, timeStr):\n tokens = timeStr.split(\":\")\n secs = 0.0\n for i in range(len(tokens)):\n secs += 60**i * float(tokens[len(tokens) - 1 - i])\n return secs\n\n def getStatistics(self, testType, graphType, directory, file, serialize=False):\n \n assert(graphType in self.graphTypes)\n assert(testType in self.testTypes)\n \n #print(testType, graphType, directory, file, serialize)\n #print(\"/usr/bin/time\",\"-v\",\"./bin/eval\", testType, graphType, os.path.join(directory,file))\n \n outName = None\n \n cmd = [\"/usr/bin/time\", \"-v\", \"./bin/eval\", testType, graphType, os.path.join(directory,file)] \n \n if serialize:\n \n outName = os.path.basename(file) + \".\" + graphType\n \n with open(os.path.join(directory, outName), \"w\") as outFile:\n p = subprocess.Popen(cmd, stdout=outFile, stderr=subprocess.PIPE, encoding='utf8')\n out, err = p.communicate()\n else:\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf8')\n out, err = p.communicate()\n \n if p.returncode != 0:\n print(\"Command failed: \" + \" \".join(cmd), file = sys.stderr)\n print(\"stderr:\\n\" + err, file = sys.stderr)\n assert(False)\n\n return err, outName\n\n\n def parseData(self, rawStats):\n \n stats = {}\n \n gen = (line for line in str(rawStats).split(\"\\n\"))\n for line in gen:\n line = line.strip().lower()\n if line.startswith(\"elapsed (wall clock)\"):\n stats[\"realTime\"] = self.parseTime(line.split()[-1])\n elif line.startswith(\"user time\"):\n stats[\"usrTime\"] = self.parseTime(line.split()[-1])\n elif line.startswith(\"system time\"):\n stats[\"sysTime\"] = self.parseTime(line.split()[-1])\n elif line.startswith(\"maximum resident set size\"):\n stats[\"memoryUsage\"] = float(line.split()[-1])\n elif line.startswith(\"number of\"):\n tokens = line.split()\n accessType = tokens[2]\n numberItems = int(tokens[-1])\n # the next line contains the actual time it took\n accessTime = float(next(gen).strip().split()[-1])\n \n stats[accessType] = (numberItems, accessTime)\n\n return stats\n\n\n\n\ndef argParser():\n parser=argparse.ArgumentParser(add_help=True)\n parser.add_argument(\"--outputFile\",\"-o\",\n type=str,\n help=\"specify the file name of the output file\")\n parser.add_argument(\"--testFileDir\",\"-i\",\n type=str,\n help=\"specify the directory name of the input files\")\n\n\n return vars(parser.parse_args())\n\ndef main():\n args = argParser()\n myResultMaker = resultMaker(args[\"testFileDir\"],args[\"outputFile\"])\n myResultMaker.runFiles()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"dataCollection.py","file_name":"dataCollection.py","file_ext":"py","file_size_in_byte":6238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"164152","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 20 13:57:24 2017\n\n@author: hashemk\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport time\nimport matplotlib.pyplot as plt\nfrom categorical_encoder import CategoricalEncoder\nfrom data_frame_selector import DataFrameSelector\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.pipeline import FeatureUnion\n\nfrom sklearn import decomposition\nfrom sklearn.manifold import TSNE\nfrom sklearn.preprocessing import StandardScaler\n#from sklearn.preprocessing import LabelEncoder\n#from sklearn.preprocessing import MinMaxScaler\nfrom ggplot import *\n\n\n\ndef app():\n print('Welcome to dimension reductionality')\n \n \ndef do_pca(df, n_comp=None,\n num_values=['IC50_MUT', 'RANK_MUT', 'IC50_WT', 'RANK_WT'], cat_values=['EPITOPE_LENGTH'],\n label_values=['RANKED']):\n \n df = df.copy()\n \n num_pipeline = Pipeline([\n ('selector', DataFrameSelector(num_values)),\n ('std_scaler', StandardScaler())\n ])\n \n cat_pipeline = Pipeline([\n ('selector', DataFrameSelector(cat_values)),\n ('cat_encoder', CategoricalEncoder(encoding=\"onehot-dense\"))\n ])\n \n full_pipeline = FeatureUnion(transformer_list=[\n (\"num_pipeline\", num_pipeline),\n (\"cat_pipeline\", cat_pipeline),\n ])\n \n \n X = full_pipeline.fit_transform(df)\n y = df[label_values].astype(np.str)\n \n \n ## PCA\n if n_comp is None:\n pca = decomposition.PCA(n_components=0.95)\n else:\n pca = decomposition.PCA(n_components=n_comp)\n X_pca = pca.fit_transform(X)\n print('Explained variation per principal component: {}'.format(pca.explained_variance_ratio_))\n\n var_exp = pca.explained_variance_ratio_\n cumsum_var = np.cumsum(var_exp)\n \n n_p = len(cumsum_var)\n plt.bar(np.arange(n_p) +1, cumsum_var)\n plt.xlabel('PCAs')\n plt.ylabel('Fraction of explained variation')\n \n \n df['pca-one'] = X[:, 0]\n df['pca-two'] = X[:, 1]\n df['output_var'] = y.astype(np.str)\n \n \n chart = ggplot( df, aes(x='pca-one', y='pca-two', color='output_var') ) \\\n + geom_point(size=75,alpha=0.4) \\\n + ggtitle(\"First and Second Principal Components\")\n print(chart)\n \n return(X_pca)\n\n\ndef do_tsne(df, n_comp=2, perpl=30, n_iter=300,\n num_values=['IC50_MUT', 'RANK_MUT', 'IC50_WT', 'RANK_WT'], \n cat_values=['EPITOPE_LENGTH'],\n label_values=['RANKED']):\n tsne_df = df.copy()\n \n num_pipeline = Pipeline([\n ('selector', DataFrameSelector(num_values)),\n ('std_scaler', StandardScaler())\n ])\n \n cat_pipeline = Pipeline([\n ('selector', DataFrameSelector(cat_values)),\n ('cat_encoder', CategoricalEncoder(encoding=\"onehot-dense\"))\n ])\n \n full_pipeline = FeatureUnion(transformer_list=[\n (\"num_pipeline\", num_pipeline),\n (\"cat_pipeline\", cat_pipeline),\n ])\n \n ## make your data \n X = full_pipeline.fit_transform(tsne_df)\n y = df[label_values].astype(np.str)\n \n time_start = time.time()\n tsne = TSNE(n_components=n_comp, verbose=1, perplexity=perpl, n_iter=n_iter)\n tsne_results = tsne.fit_transform(X)\n print('t-SNE done! Time elapsed: {} seconds'.format(time.time()-time_start))\n \n \n \n tsne_df['x-tsne'] = tsne_results[:,0]\n tsne_df['y-tsne'] = tsne_results[:,1]\n tsne_df['output_var'] = y.astype(np.str)\n\n chart = ggplot(tsne_df, aes(x='x-tsne', y='y-tsne', color='output_var') ) \\\n + geom_point(size=70,alpha=0.7) \\\n + ggtitle(\"tSNE dimensions colored by digit\")\n print(chart)\n \n \n\n\nif __name__ == '__main__':\n app()","sub_path":"pyEnzo/pyNeoantigen/dimension_reductionality.py","file_name":"dimension_reductionality.py","file_ext":"py","file_size_in_byte":3662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"651639550","text":"Input = list(map(int,input().split()))\nn, pos, l, r = Input[0], Input[1], Input[2], Input[3]\nif l == 1 and r != n:\n print(abs(r - pos) + 1)\nelif r == n and l != 1:\n print(abs(pos - l) + 1)\nelif l == 1 and r == n:\n print('0')\nelse:\n print(min(abs(pos - l), abs(r - pos)) + r - l + 2)","sub_path":"CodeForces/browser.py","file_name":"browser.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"146307335","text":"import requests\nfrom bs4 import BeautifulSoup\n# BeautifulSoup를 불러옴\n\ndef spider(max_pages):\n# 크롤할 최대 페이지 수를 정한다.\n\n page = 1\n# 최대 페이지 수 1개\n\n while page < max_pages:\n\n url = 'http://creativeworks.tistory.com/' + str(page)\n\n source_code = requests.get(url)\n\n plain_text = source_code.text\n #위에 단계까지 하게 되면 데이터가 나오지만 가독성이 떨어진다.\n\n soup = BeautifulSoup(plain_text, 'lxml')\n # 그래서 뷰티풀 수프를 사용하여 가독성을 좋게 해준다\n # lxml은 파이썬의 내장 파서보다 속도가 월등히 빠르다고한다,=.\n\n for link in soup.select('h2 > a'):\n\n href = \"http://creativeworks.tistory.com\" + link.get('href')\n # HTML 값에서 href값을 추출하여 기본 URL과 합쳐줌 그 후 herf라는 변수에 저장.\n title = link.string\n\n print(href)\n print(title)\n\n page += 1\n\nspider(10)\n# 최대 페이즈를 9페이지까지 설정해줌","sub_path":"파이썬으로_크롤링/BeautifulSoup로_크롤링하는_법.py","file_name":"BeautifulSoup로_크롤링하는_법.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"553132886","text":"class Node:\n def __init__(self, value):\n self.value = value\n self.next = None\n\n\nclass LinkedList:\n def __init__(self, head=None):\n self.head = head\n\n def append(self, new_node):\n \"\"\" Appends new node to end of linked list, O(n) \"\"\"\n current = self.head\n if self.head:\n while current.next:\n current = current.next\n current.next = new_node\n else:\n self.head = new_node\n\n def get_position(self, position):\n \"\"\" Gets node from a particular position, O(n) \"\"\"\n if position == 1:\n return self.head\n\n if not self.head or position < 1:\n return None\n\n current = self.head\n while position > 1 and current.next:\n current = current.next\n position -= 1\n if position == 1:\n return current\n else:\n return None\n\n def insert(self, new_node, position):\n \"\"\" Inserts new node at the given position, O(n) \"\"\"\n if position == 1:\n old_head = self.head\n self.head = new_node\n self.head.next = old_head\n elif self.head and position > 1:\n current = self.head\n while current.next and position > 2:\n current = current.next\n position -= 1\n if position == 2:\n new_node.next = current.next\n current.next = new_node\n\n def delete(self, value):\n \"\"\" Deletes first node with a given value, O(n) \"\"\"\n if self.head:\n if self.head.value == value:\n self.head = self.head.next\n else:\n current = self.head\n while current.next:\n if current.next.value == value:\n current = current.next.next\n\n def insert_first(self, new_node):\n \"\"\" Inserts new node at head of linked list, O(1) \"\"\"\n new_node.next = self.head\n self.head = new_node\n\n def delete_first(self):\n \"\"\" Removed and returns head of linked list, O(1) \"\"\"\n if not self.head:\n return None\n\n old_head = self.head\n self.head = self.head.next\n return old_head\n\n\nclass Stack:\n def __init__(self, top=None):\n self.ll = LinkedList(top)\n\n def push(self, new_node):\n \"\"\" Pushes new node onto stack, O(1) \"\"\"\n self.ll.insert_first(new_node)\n\n def pop(self):\n \"\"\" Pops first node off of stack, O(1) \"\"\"\n return self.ll.delete_first()\n\n def peek(self):\n \"\"\" Returns first node of stack, O(1) \"\"\"\n return self.ll.get_position(1)\n\n\nclass Queue:\n def __init__(self, head=None):\n self.ll = LinkedList(head)\n\n def enqueue(self, new_node):\n \"\"\" Inserts new node onto tail of queue, O(n) \"\"\"\n self.ll.append(new_node)\n\n def peek(self):\n \"\"\" Returns node at head of queue, O(1) \"\"\"\n return self.ll.get_position(1)\n\n def dequeue(self):\n \"\"\" Removed and returns node at head, O(1) \"\"\"\n return self.ll.delete_first()\n\n# Test cases\n# Set up some Nodes\ne1 = Node(1)\ne2 = Node(2)\ne3 = Node(3)\ne4 = Node(4)\n\n# Start setting up a LinkedList\nll = LinkedList(e1)\nll.append(e2)\nll.append(e3)\n\n# Test get_position\n# Should print 3\nprint(ll.head.next.next.value)\n# Should also print 3\nprint(ll.get_position(3).value)\n\n# # Test insert\nll.insert(e4, 3)\n# # Should print 4 now\nprint(ll.get_position(3).value)\n\n# # Test delete\nll.delete(1)\n# # Should print 2 now\nprint(ll.get_position(1).value)\n# # Should print 4 now\nprint(ll.get_position(2).value)\n# # Should print 3 now\nprint(ll.get_position(3).value)\n\n# Set up some Nodes\ne1 = Node(1)\ne2 = Node(2)\ne3 = Node(3)\ne4 = Node(4)\n\n# Start setting up a Stack\nstack = Stack(e1)\n\n# Test stack functionality\nstack.push(e2)\nstack.push(e3)\nprint(stack.pop().value)\nprint(stack.pop().value)\nprint(stack.pop().value)\nprint(stack.pop())\nstack.push(e4)\nprint(stack.pop().value)\n\n# Setup\nq = Queue(Node(1))\nq.enqueue(Node(2))\nq.enqueue(Node(3))\n\n# Test peek\n# Should be 1\nprint(q.peek().value)\n\n# Test dequeue\n# Should be 1\nprint(q.dequeue().value)\n\n# # Test enqueue\nq.enqueue(Node(4))\n# # Should be 2\nprint(q.dequeue().value)\n# # Should be 3\nprint(q.dequeue().value)\n# # Should be 4\nprint(q.dequeue().value)\nq.enqueue(Node(5))\n# # Should be 5\nprint(q.peek().value)\n","sub_path":"Lists/LinkedList.py","file_name":"LinkedList.py","file_ext":"py","file_size_in_byte":4375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"153415054","text":"import os\nimport xarray as xr\nfrom natsort import natsorted\nimport glob\nimport pandas as pd\nimport numpy as np\nimport cv2\nimport itertools\n\ndef open_minian(dpath, fname='minian', backend='zarr', chunks=None):\n \"\"\"\n Opens minian outputs.\n\n Parameters\n ---\n dpath: str, path to folder containing the minian outputs folder.\n fname: str, name of the minian output folder.\n backend: str, 'zarr' or 'netcdf'. 'netcdf' seems outdated.\n chunks: ??\n \"\"\"\n if backend is 'netcdf':\n fname = fname + '.nc'\n mpath = os.path.join(dpath, fname)\n with xr.open_dataset(mpath) as ds:\n dims = ds.dims\n chunks = dict([(d, 'auto') for d in dims])\n ds = xr.open_dataset(os.path.join(dpath, fname), chunks=chunks)\n\n return ds\n\n elif backend is 'zarr':\n mpath = os.path.join(dpath, fname)\n dslist = [xr.open_zarr(os.path.join(mpath, d))\n for d in os.listdir(mpath)\n if os.path.isdir(os.path.join(mpath, d))]\n ds = xr.merge(dslist)\n if chunks is 'auto':\n chunks = dict([(d, 'auto') for d in ds.dims])\n\n return ds.chunk(chunks)\n\n else:\n raise NotImplementedError(\"backend {} not supported\".format(backend))\n\n\ndef concat_avis(path, pattern='behavCam*.avi',\n fname='Merged.avi', fps=30, isColor=True):\n \"\"\"\n Concatenates behavioral avi files for ezTrack.\n\n Parameters\n ---\n path: str, path to folder containing avis. All avis will be merged.\n pattern: str, pattern of video clips.\n fname: str, file name of final merged clip.\n fps: int, sampling rate.\n isColor: bool, flag for writing color.\n\n Return\n ---\n final_clip_name: str, full file name of final clip.\n \"\"\"\n # Get all files.\n files = natsorted(glob.glob(os.path.join(path, pattern)))\n\n # Get width and height.\n cap = cv2.VideoCapture(files[0])\n size = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), \\\n int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n\n # Define writer.\n fourcc = 0\n final_clip_name = os.path.join(path, fname)\n writer = cv2.VideoWriter(final_clip_name, fourcc,\n fps, size, isColor=isColor)\n\n for file in files:\n print(f'Processing {file}')\n cap = cv2.VideoCapture(file)\n cap.set(1,0) # Go to frame 0.\n cap_max = int(cap.get(7)) #7 is the index for total frames.\n\n # Loop through all the frames.\n for frame_num in range(cap_max):\n ret, frame = cap.read()\n if ret:\n # Convert to grayscale if specified.\n if not isColor:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n writer.write(frame)\n else:\n break\n\n cap.release()\n\n writer.release()\n print(f'Writing {final_clip_name}')\n\n return final_clip_name\n\n\ndef read_eztrack(csv_fname, cm_per_pixel=1):\n \"\"\"\n Reads ezTrack outputs.\n\n Parameters\n ---\n csv_fname: str, path to tracking .csv from ezTrack.\n cm_per_pixel: float, centimeters per pixel.\n\n Return\n ---\n position: dict, with keys x, y, frame, distance.\n \"\"\"\n # Open file.\n df = pd.read_csv(csv_fname[0])\n\n # Consolidate into dict.\n position = {'x': np.asarray(df['X']) / cm_per_pixel, # x position\n 'y': np.asarray(df['Y']) / cm_per_pixel, # y position\n 'frame': np.asarray(df['Frame']), # Frame number\n 'distance': np.asarray(df['Distance']) / cm_per_pixel} # Distance traveled since last sample\n\n return position\n\n\ndef synchronize_time_series(position, neural, behav_fps=30, neural_fps=15):\n \"\"\"\n Synchronizes behavior and neural time series by interpolating behavior.\n\n :parameters\n ---\n position: dict, output from read_ezTrack().\n neural: (neuron, t) array, any time series output from minian (e.g., C, S).\n behav_fps: float, sampling rate of behavior video.\n neural_fps: float, sampling rate of minian data.\n\n :return\n ---\n position: dict, interpolated data based on neural sampling rate.\n\n \"\"\"\n # Get number of frames in each video.\n neural_nframes = neural.shape[1]\n behav_nframes = len(position['frame'])\n\n # Create time vectors.\n neural_t = np.arange(0, neural_nframes/neural_fps, 1/neural_fps)\n behav_t = np.arange(0, behav_nframes/behav_fps, 1/behav_fps)\n\n # Interpolate.\n position['x'] = np.interp(neural_t, behav_t, position['x'])\n position['y'] = np.interp(neural_t, behav_t, position['y'])\n position['frame'] = np.interp(neural_t, behav_t, position['frame'])\n\n # Normalize.\n position['x'] = position['x'] - min(position['x'])\n position['y'] = position['y'] - min(position['y'])\n\n # Compute distance at each consecutive point.\n pos_diff = np.diff(position['x']), np.diff(position['y'])\n position['distance'] = np.hypot(pos_diff[0], pos_diff[1])\n\n # Compute velocity by dividing by 1/fps.\n position['velocity'] = \\\n np.concatenate(([0], position['distance']*min((neural_fps, behav_fps))))\n\n return position\n\n\ndef get_transient_timestamps(neural_data, std_thresh=3):\n \"\"\"\n Converts an array of continuous time series (e.g., traces or S)\n into lists of timestamps where activity exceeds some threshold.\n\n :parameters\n ---\n neural_data: (neuron, time) array\n Neural time series, (e.g., C or S).\n\n std_thresh: float\n Number of standard deviations above the mean to define threshold.\n\n :returns\n ---\n event_times: list of length neuron\n Each entry in the list contains the timestamps of a neuron's\n activity.\n\n event_mags: list of length neuron\n Event magnitudes.\n\n \"\"\"\n # Compute thresholds for each neuron.\n stds = np.std(neural_data, axis=1)\n means = np.mean(neural_data, axis=1)\n thresh = means + std_thresh*stds\n\n # Get event times and magnitudes.\n bool_arr = neural_data > np.tile(thresh,[neural_data.shape[1], 1]).T\n\n event_times = [np.where(neuron > t)[0] for neuron, t\n in zip(neural_data, thresh)]\n\n event_mags = [neuron[neuron > t] for neuron, t\n in zip(neural_data, thresh)]\n\n return event_times, event_mags, bool_arr\n\n\ndef distinct_colors(n):\n def MidSort(lst):\n if len(lst) <= 1:\n return lst\n i = int(len(lst) / 2)\n ret = [lst.pop(i)]\n left = MidSort(lst[0:i])\n right = MidSort(lst[i:])\n interleaved = [item for items in itertools.zip_longest(left, right)\n for item in items if item != None]\n ret.extend(interleaved)\n return ret\n\n\n # Build list of points on a line (0 to 255) to use as color 'ticks'\n max_ = 255\n segs = int(n ** (1 / 3))\n step = int(max_ / segs)\n p = [(i * step) for i in np.arange(1, segs)]\n points = [0, max_]\n points.extend(MidSort(p))\n\n # Not efficient!!! Iterate over higher valued 'ticks' first (the points\n # at the front of the list) to vary all colors and not focus on one channel.\n colors = [\"#%02X%02X%02X\" % (points[0], points[0], points[0])]\n r = 0\n total = 1\n while total < n and r < len(points):\n r += 1\n for c0 in range(r):\n for c1 in range(r):\n for c2 in range(r):\n if total >= n:\n break\n c = \"#%02X%02X%02X\" % (points[c0], points[c1], points[c2])\n if c not in colors and c != '#FFFFFF':\n colors.append(c)\n total += 1\n\n return colors\n\n\ndef ordered_unique(sequence):\n seen = set()\n seen_add = seen.add\n\n return [x for x in sequence if not (x in seen or seen_add(x))]\n\n\n\n\n\n\nif __name__ == '__main__':\n import matplotlib.pyplot as plt\n path = r'D:\\Projects\\GTime\\Data\\G123\\2\\H14_M46_S20'\n #behav_path = os.path.join(path, 'Behavior', 'Merged_tracked.csv')\n\n minian = open_minian(path)\n S = np.asarray(minian.S)\n\n get_transient_timestamps(S)\n","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":8080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"186512868","text":"import requests\nimport re\n\nfrom requests.exceptions import RequestException\n\n\ndef fileo(name, contains):\n file = open(name, \"w\", encoding=\"utf-8\")\n file.truncate()\n file.write(contains)\n file.close()\n\n\n# ================发送请求函数================== #\ndef get_one_page(url, headers=True): # 打开页面并抛出错误\n try:\n if headers:\n response = requests.get(url)\n else:\n response = requests.get(url, headers=headers)\n if response.status_code == 200:\n response.encoding = 'utf-8'\n return response.text\n return None\n except RequestException:\n return None\n\n\n# ================请求头编写================== #\nheaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:68.0) Gecko/20100101 Firefox/68.0'}\n\n# ================URL 保留占位符================== #\n# url = 'https://kyfw.12306.cn/otn/leftTicket/init?linktypeid=dc&fs={src},AOH&ts={dst},NJH&date={date}&flag=N,N,Y'\nurl = 'https://kyfw.12306.cn/otn/leftTicket/init?linktypeid=dc&fs={0},AOH&ts={1},NJH&date={2}&flag=N,N,Y'\n# ================拼接URL串================== #\nsrc = \"\"\ndst = \"\"\ndate = \"\"\n\nsrc = \"上海虹桥\"\ndst = \"南京\"\ndate = \"2019-09-05\"\n\n# url.replace(\"{src}\", src)\n# url.replace(\"{dst}\", dst)\n# url.replace(\"{date}\", date)\nurl = url.format(src, dst, date)\n\n# ================发送请求================== #\n\n# html = get_one_page(url, headers)\nhtml = get_one_page(url)\nfileo(\"out\", html)\nprint(html)\nprint(url)\n","sub_path":"crawlers/linesCrawler.py","file_name":"linesCrawler.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"50679687","text":"from flask import Flask\nfrom flask import render_template, request, redirect, url_for\nimport json\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n if request.args:\n f = open('data.json', 'r', encoding='utf-8')\n massive = json.load(f)\n f.close()\n dict = {'language': request.args['language'], 'answer1': request.args['answer1'], 'answer2': request.args['answer2']}\n massive.append(dict)\n w = open('data.json', 'w', encoding='utf-8')\n json.dump(massive, w)\n w.close()\n return render_template('anketa1.html')\n\n@app.route('/json')\ndef json_file():\n f = open('data.json', 'r', encoding='utf-8')\n slovar=json.load(f)\n f.close()\n return render_template('answer2.html', dictionary=slovar)\n\n@app.route('/stats')\ndef stats():\n z = open('data.json', 'r', encoding='utf-8')\n slovar = []\n sh = 0\n pl = 0\n ko = 0\n ka = 0\n for elem in json.load(z):\n slovar.append(elem['answer1'])\n slovar.append(elem['answer2'])\n for i in slovar:\n if i == 'шарф':\n sh += 1\n if i == 'платок':\n pl += 1\n if i == 'косынка':\n ko += 1\n if i == 'кашне':\n ka += 1\n all_answers = sh + pl\n z.close()\n return render_template('answer.html', sharf=sh, platok=pl, kosinka=ko, kashne=ka, all_answers=all_answers)\n\n@app.route('/search')\ndef search():\n return render_template('search.html')\n\n@app.route('/result')\ndef result():\n z = open('data.json', 'r', encoding='utf-8')\n slovar = []\n sh = 0\n pl = 0\n ko = 0\n ka = 0\n word = 0\n people = 0\n for elem in json.load(z):\n slovar.append(elem['answer1'])\n slovar.append(elem['answer2'])\n for i in slovar:\n if i == 'шарф':\n sh += 1\n if i == '��латок':\n pl += 1\n if i == 'косынка':\n ko += 1\n if i == 'кашне':\n ka += 1\n if request.args:\n word = request.args['word']\n if word == 'шарф':\n people = sh\n if word == 'платок':\n people = pl\n if word == 'косынка':\n people = ko\n if word == 'кашне':\n people = ka\n z.close()\n return render_template('result.html', people=people, word=word)\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"questionnaire/Project2.py","file_name":"Project2.py","file_ext":"py","file_size_in_byte":2397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"86294052","text":"class TrieNode(object):\n def __init__(self):\n self.child = dict()\n self.flag = False\n \nclass WordDictionary(object):\n def __init__(self):\n \"\"\"\n initialize your data structure here.\n \"\"\"\n self.root = TrieNode()\n\n def addWord(self, word):\n \"\"\"\n Adds a word into the data structure.\n :type word: str\n :rtype: void\n \"\"\"\n cur = self.root\n for i in word:\n if i not in cur.child:\n cur.child[i] = TrieNode()\n cur = cur.child[i]\n cur.flag = True\n \n\n def search(self, word):\n \"\"\"\n Returns if the word is in the data structure. A word could\n contain the dot character '.' to represent any one letter.\n :type word: str\n :rtype: bool\n \"\"\"\n # index = 0\n # cur = self.root\n # stack = [[cur,index]]\n # while stack:\n # cur, index = stack.pop()\n # if index >= len(word): continue\n # if word[index] in cur.child:\n # if index == len(word) - 1 and cur.child[word[index]].flag: return True\n # stack.append([cur.child[word[index]],index+1])\n # elif word[index] == '.':\n # if index == len(word) - 1:\n # for i in cur.child:\n # if cur.child[i].flag: return True\n # for i in cur.child:\n # stack.append([cur.child[i],index+1])\n # return False\n\n # stack = [self.root]\n # for i in word:\n # level = []\n # while stack:\n # temp = stack.pop()\n # if i in temp.child:\n # level+=temp.child[i],\n # if i == '.':\n # level+=temp.child.values()\n # if not level: return False\n # stack = level\n # return any(i.flag for i in stack)\n\n # recursion\n # children = list(string.ascii_lowercase) if word[0] == '.' else [word[0]]\n # for child in children:\n # if child in node and self.search(word[1:], node[child]): return True\n # return False\n def f(i, root):\n if i == len(word): return root.flag\n if word[i] in root.child:\n return f(i+1, root.child[word[i]])\n elif word[i] == '.':\n return any(f(i+1, j) for j in root.child.values())\n else:\n return False\n return f(0, self.root)\n\n# Your WordDictionary object will be instantiated and called as such:\n# wordDictionary = WordDictionary()\n# wordDictionary.addWord(\"word\")\n# wordDictionary.search(\"pattern\")\nif __name__ == '__main__':\n wordDictionary = WordDictionary()\n wordDictionary.addWord(\"aa\")\n wordDictionary.addWord(\"aaaa\")\n print(wordDictionary.search(\".\"))\n print(wordDictionary.search(\"a\"))\n print(wordDictionary.search(\"aa\"))\n print(wordDictionary.search(\"a.\"))\n print(wordDictionary.search(\"..b\"))","sub_path":"Algorithms/Add and Search Word - Data structure design/Add and Search Word.py","file_name":"Add and Search Word.py","file_ext":"py","file_size_in_byte":3021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"580782253","text":"\"\"\"\nscratch05.ex01.py\n통계\n\n중심 경향성: 평균, 중앙값, 분위수(4분위, 100분위=퍼센트), 최빈값\n산포도: 분산(variance), 표준편차(standard deviation), 범위(range)\n상관 관계: 공분산(covariance), 상관 계수(correlation)\n\"\"\"\nfrom collections import Counter\nfrom math import sqrt\nfrom scratch04.ex01 import dot\n\ndef mean(x):\n \"\"\"\n 리스트 x의 모든 아이템들의 평균을 계산해서 리턴\n x = [x1,x2,... xn]\n mean = (x1 + x2 + ... + xn) / n\n :param x: 원소 n개인 (1차원) 리스트\n :return: 평균\n \"\"\"\n # sum = 0\n # for i in x:\n # sum += x[i]\n # avg = sum / len(x)\n return sum(x) / len(x)\n\ndef median(x):\n \"\"\"\n 리스트 x를 정렬했을 때 중앙에 있는 값을 찾아서 리턴\n n이 홀수이면, 중앙값을 찾아서 리턴,\n n이 짝수이면, 중앙에 있는 두 개 값의 평균을 리턴\n :param x: 원소 n개인 (1차원) 리스트\n :return: 중앙값\n \"\"\"\n # x = x.sort()\n # if len(x)//2 == 0: # 짝수\n # y = (x[len(x)/2-1]+x[len(x)/2])/2\n # else: # 홀수\n # y = x[round(len(x)/2)]\n # return y\n\n n = len(x) # 리스트의 아이템 개수\n sorted_x = sorted(x) # 데이터 크기 순으로 정렬(오름차순)\n mid_point = n//2 # 리스트의 가운데 위치(인덱스)\n if n % 2: # n이 홀수인 경우\n median_value = sorted_x[mid_point]\n else: # n이 짝수인 경우\n median_value = (sorted_x[mid_point -1] + sorted_x[mid_point]) / 2\n return median_value\n\ndef quantile(x,p):\n \"\"\"\n\n 리스트 x의 p분위에 속하는 값을 리턴\n :param x: 원소 n개인 (1차원) 리스트\n :param p: 0 ~ 1.0 사이의 값\n :return: 해당 분위수(퍼센트)의 값\n \"\"\"\n n = len(x) # 리스트의 아이템 개수\n p_index = int(n * p) # 해당 퍼센트의 인덱스 - 소수점 버리기\n sorted_x = sorted(x) # 오름차순 정렬된 리스트\n return sorted_x[p_index]\n\ndef mode(x):\n \"\"\"\n 리스트에서 가장 자주 나타나는 값을 리턴\n 최빈값이 여러개인 경우, 최빈값들의 리스트를 리턴.\n from collections import Counter\n\n :param x: 원소가 n개인 (1차원) 리스트\n :return: 최빈값들의 리스트\n \"\"\"\n counts = Counter(x) # Counter 객체(인스턴스) 생성\n # print(counts)\n # print(counts.keys(), counts.values())\n # Counter.keys() : 데이터(아아이), Counter.values(): 빈도수\n # print(counts.items()) # (값, 빈도수) 튜플들의 리스트\n max_count = max(counts.values()) # 빈도수의 최대값\n return [val for val, cnt in counts.items()\n if cnt == max_count]\n # freq = [] # 최빈값들을 저장할 리스트\n # for val,cnt in counts.items(): # Counter 객체에 대해서 반복\n # if cnt == max_count: # 빈도수가 최대 빈도수와 같으면\n # freq.append(val) # 리스트에 저장\n # return freq\n\ndef data_range(x):\n \"\"\"\n\n :param x: 원소 n개인 (1차원) 리스트\n :return: 리스트 최대값 - 리스트 최소값\n \"\"\"\n\n # return max(x) - min(x)\n sorted_x = sorted(x)\n return sorted_x[len(x)-1] - sorted_x[0]\n\ndef de_mean(x):\n \"\"\"\n 편차(데이터 - 평균)들의 리스트\n\n :param x: 원소 n개인 (1차원) 리스트\n :return: 편차(deviation)들의 리스트\n \"\"\"\n mu = mean(x) # 평균\n return [x_i - mu for x_i in x]\ndef variance(x):\n \"\"\"\n (x1 - mean) ** 2 + (x2 - mean) ** 2 + ... + (xn - mean) ** 2 / (n - 1)\n :param x: 원소 n개인 (1차원) 리스트\n :return: x의 분산 값\n \"\"\"\n # sum = 0\n # for i in x:\n # sum += (i - mean(x)) ** 2\n # v = sum / (len(x)-1)\n\n n = len(x) # 원소 개수\n # x_bar = mean(x) # 평균\n # return sum([(x_i - x_bar) ** 2 for x_i in x]) / (n-1)\n\n # 다른방법\n # deviations = de_mean(x) # 편차들의 리스트\n # return sum([d ** 2 for d in deviations]) / (n-1)\n\n # dot()함수를 import해서 하는 방법\n print('dot 함수 사용')\n deviations = de_mean(x)\n return dot(deviations, deviations) / (n-1)\n\n\ndef standard_deviation(x):\n \"\"\"\n sqrt(variance)\n :param x: 원소 n개인 (1차원) 리스트\n :return: 표준편차\n \"\"\"\n return sqrt(variance(x))\n\ndef covariance(x,y):\n \"\"\"\n 공분산(covariance)\n Cov = sum((xi - x_bar)(yi - y_bar)) / (n - 1)\n\n :param x: 원소 n개인 (1차원) 리스트\n :param y: 원소 n개인 (1차원) 리스트\n :return: 공분산\n \"\"\"\n # 강사님답안\n x_bar = mean(x) # x의 평균\n y_bar = mean(y) # y의 평균\n x_deviations = [x_i - x_bar for x_i in x]\n y_deviations = [y_i - y_bar for y_i in y]\n sum_of_deviations = dot(x_deviations, y_deviations)\n # sum_of_deviations = 0 # sum((xi - x_bar)(yi - y_bar))\n # for xd, yd in zip(x_deviations,y_deviations):\n # sum_of_deviations += xd * yd\n return sum_of_deviations / (len(x) - 1)\n\n # n = len(x)\n # return dot(de_mean(x),de_mean(y)) / (n - 1)\n\ndef correlation(x,y):\n \"\"\"\n 상관 계수(correlation)\n Corr = cov(x, y) / (SD(x) * SD(y) )\n\n :param x: 원소 n개인 (1차원) 리스트\n :param y: 원소 n개인 (1차원) 리스트\n :return: 상관 계수\n \"\"\"\n sd_x = standard_deviation(x)\n sd_y = standard_deviation(y)\n if sd_x != 0 and sd_y !=0:\n corr = covariance(x,y) / (sd_x * sd_y)\n else:\n corr = 0\n return corr\n\n\n\nif __name__ == '__main__':\n data = [2,2,3,3,4,4,4,6,6,6,100]\n mean_data = mean(data)\n print(mean_data)\n\n median_data = median(data)\n print(median_data)\n\n quantile_1 = quantile(data,0.25) # 1사분위 값\n print(quantile_1)\n\n quantile_3 = quantile(data, 0.75) # 3사분위 값\n print(quantile_3)\n\n most_frequent = mode(data)\n print(most_frequent)\n\n print(variance(data))\n x =[1,2,3,4,5,6,7,8,9,10]\n y =[2,4,8,16,32,64,128,256,512,1024]\n cov = covariance(x,y)\n print('covariance =', cov)\n corr = correlation(x,y)\n print('correlation =', corr)\n\n x = [-3,-2,-1,0,1,2,3]\n y = [3,2,1,0,1,2,3]\n # y = /x/\n print(correlation(x,y))\n\n\n\n\n\n\n\n\n","sub_path":"scratch05/ex01.py","file_name":"ex01.py","file_ext":"py","file_size_in_byte":6174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"35443581","text":"# -*- coding: utf-8 -*-\nfrom setuptools import setup, find_packages\n# from pip.req import parse_requirements\nimport re, ast\n\n# get version from __version__ variable in pivot_table/__init__.py\n_version_re = re.compile(r'__version__\\s+=\\s+(.*)')\n\nwith open('pivot_table/__init__.py', 'rb') as f:\n version = str(ast.literal_eval(_version_re.search(\n f.read().decode('utf-8')).group(1)))\n\n# requirements = parse_requirements(\"requirements.txt\", session=\"\")\n\nsetup(\n\tname='pivot_table',\n\tversion=version,\n\tdescription='Pivot Tables using PivotTable.js',\n\tauthor='vijaywm',\n\tauthor_email='vijay_wm@yahoo.com',\n\tpackages=find_packages(),\n\tzip_safe=False,\n\tinclude_package_data=True,\n\tinstall_requires=['frappe']\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"18684538","text":"\r\nfrom io_thread import IO_Thread\r\nimport time\r\nfrom datetime import datetime\r\n\r\ntry:\r\n import pigpio \r\n _pigpio_ok=True\r\nexcept ImportError:\r\n _pigpio_ok=False\r\n\r\nclass IO_Thread_Sprinkler(IO_Thread):\r\n def __init__(self,**kwargs):\r\n self._valve_pins=kwargs.get('valve_pins',False)\r\n self._valve_names=kwargs.get('valve_names',False)\r\n self._schedule=[]\r\n self._valve_mode=['AUTO']*len(self._valve_pins)\r\n self._set_default_schedule()\r\n self._valve_states=[0]*len(self._valve_pins)\r\n IO_Thread.__init__(self,**kwargs)\r\n \r\n \r\n #get the parameter name associated with the kth sensor\r\n #k starts at 0\r\n def _get_pname(self,k):\r\n if self._valve_names==False:\r\n return '%d' % (k+1)\r\n else:\r\n return self._valve_names[k]\r\n \r\n def _set_op_desc(self):\r\n for k,valve_pin in enumerate(self._valve_pins):\r\n #print ('IO_Thread_Moist._set_op_desc: k=',k,'det_pin=',det_pin)\r\n pname=self._get_pname(k)\r\n self._op_desc[pname]={\r\n 'pdesc':'Sprinkler',\r\n 'ptype':'valve_pos',\r\n 'pdatatype':'float',\r\n 'pmin':-10,\r\n 'pmax':110,\r\n 'punits':'%' }\r\n \r\n def _set_default_schedule(self):\r\n self._add_to_schedule([0,20,0,20,20])\r\n self._add_to_schedule([1,19,15,19,35])\r\n self._add_to_schedule([0,13,30,13,40])\r\n self._add_to_schedule([2,19,40,19,56])\r\n #self._add_to_schedule([0,21,30,21,32])\r\n print(\"Sprinkler Schedule: \",self._schedule)\r\n \r\n #program_peg is [valve_index,start_h,start_m,stop_h,stop_m]\r\n #remember valve_index starts at 0\r\n def _add_to_schedule(self,program_peg):\r\n self._schedule.append(program_peg)\r\n \r\n def _clear_valve_states(self):\r\n for k,valve_pin in enumerate(self._valve_pins):\r\n self._valve_states[k]=0 \r\n \r\n def _check_pegs(self):\r\n t_now=datetime.now()\r\n self._clear_valve_states()\r\n for peg in self._schedule:\r\n valve_num=peg[0]\r\n peg_start=t_now.replace(hour=peg[1],minute=peg[2],second=0,microsecond=0)\r\n peg_stop=t_now.replace(hour=peg[3],minute=peg[4],second=0,microsecond=0)\r\n if (t_now>peg_start) and (t_now \r\n if cmd=='SPRINK:MODE':\r\n d=data.split(',')\r\n valve_number=int(d[0].strip())\r\n valve_mode=d[1].strip()\r\n self._valve_mode[valve_number]=valve_mode\r\n self._heartbeat(datetime.now()) #force an update\r\n \r\n def _shutdown(self):\r\n if not self._sim_hw:\r\n self._turn_off()\r\n IO_Thread._shutdown(self)\r\n#END class IO_Thread_Moist------------------------------------------------ \r\n","sub_path":"io_sprinkler.py","file_name":"io_sprinkler.py","file_ext":"py","file_size_in_byte":4209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"297852745","text":"# Imports GLFW, OpenGL, and numpy\r\nimport glfw\r\nfrom OpenGL.GL import *\r\nimport OpenGL.GL.shaders\r\nimport numpy\r\nimport pyrr\r\n\r\n# List of cords for the vertices for drawing a triangle and the vertex colors for is as well (order of cords is \"x, y, z, r, g, b\")\r\ntri_verts = [-0.5, -0.5, 0.0, 1.0, 1.0, 0.0,\r\n 0.5, -0.5, 0.0, 0.0, 1.0, 1.0,\r\n 0.0, 0.5, 0.0, 1.0, 0.0, 1.0]\r\n\r\n\r\n# Converts list to numpy array\r\ntri_verts = numpy.array(tri_verts, dtype = numpy.float32)\r\n\r\n# Imports vertex shader\r\nimport_vs = open('shaders/vertex_shader.txt', 'r')\r\nvert_shader = import_vs.read()\r\nimport_vs.close()\r\n\r\n# Imports fragment shader\r\nimport_fs = open('shaders/fragment_shader.txt', 'r')\r\nfrag_shader = import_fs.read()\r\nimport_fs.close()\r\n\r\n# Main window\r\ndef main():\r\n #////////////////////////////////////////////#\r\n # Some variables to change the view and such #\r\n #////////////////////////////////////////////#\r\n\r\n FOV = 35 # Changes the field of view\r\n rotation_speed = 1 # Changes speed of the triangles rotation\r\n display_res = [1920, 1080] # Window resolution\r\n\r\n # Initializing glfw\r\n if not glfw.init():\r\n print('Error when trying to initialize GLFW')\r\n return\r\n print('Initializing GLFW...')\r\n\r\n # Makes window\r\n window = glfw.create_window(display_res[0], display_res[1], \"OpenGL Test\", None, None)\r\n print('Creating window...')\r\n\r\n # Closes program when window is closed\r\n if not window:\r\n glfw.terminate()\r\n return\r\n\r\n # Start of OpenGL code\r\n glfw.make_context_current(window)\r\n\r\n # Compiles the vertex and fragment shader together\r\n shader = OpenGL.GL.shaders.compileProgram(OpenGL.GL.shaders.compileShader(vert_shader, GL_VERTEX_SHADER),\r\n OpenGL.GL.shaders.compileShader(frag_shader, GL_FRAGMENT_SHADER))\r\n print('Compiling shaders...')\r\n\r\n # Sets background color\r\n glClearColor(1, 1, 1, 1)\r\n\r\n # Sends vertex data to VRAM\r\n VBO = glGenBuffers(1)\r\n glBindBuffer(GL_ARRAY_BUFFER, VBO)\r\n glBufferData(GL_ARRAY_BUFFER, 72, tri_verts, GL_STATIC_DRAW)\r\n\r\n # Creates triangle\r\n position = glGetAttribLocation(shader, 'position')\r\n glVertexAttribPointer(position, 3, GL_FLOAT, GL_FALSE, 24, ctypes.c_void_p(0))\r\n glEnableVertexAttribArray(position)\r\n\r\n # Colors the triangle\r\n color = glGetAttribLocation(shader, 'colors')\r\n glVertexAttribPointer(color, 3, GL_FLOAT, GL_FALSE, 24, ctypes.c_void_p(12))\r\n glEnableVertexAttribArray(color)\r\n\r\n # Enables shaders\r\n glUseProgram(shader)\r\n\r\n # Perspective matrix\r\n view_mat = pyrr.matrix44.create_from_translation(pyrr.Vector3([0, 0., -3.0]))\r\n projection_mat = pyrr.matrix44.create_perspective_projection_matrix(FOV, display_res[0] / display_res[1], 0.1, 50)\r\n model_mat = pyrr.matrix44.create_from_translation(pyrr.Vector3([0, 0, 0]))\r\n view_location = glGetUniformLocation(shader, 'view')\r\n proj_location = glGetUniformLocation(shader, 'projection')\r\n model_location = glGetUniformLocation(shader, 'model')\r\n\r\n glUniformMatrix4fv(view_location, 1, GL_FALSE, view_mat)\r\n glUniformMatrix4fv(proj_location, 1, GL_FALSE, projection_mat)\r\n glUniformMatrix4fv(model_location, 1, GL_FALSE, model_mat)\r\n\r\n print('Program started successfully!')\r\n\r\n # Main loop\r\n while not glfw.window_should_close(window):\r\n # Put opengl rendering code here...\r\n glClear(GL_COLOR_BUFFER_BIT)\r\n\r\n # Rotates the triangle\r\n transform = glGetUniformLocation(shader, 'transform')\r\n rotation_x = pyrr.Matrix44.from_x_rotation(rotation_speed * glfw.get_time())\r\n rotation_y = pyrr.Matrix44.from_y_rotation(rotation_speed * glfw.get_time())\r\n rotation_z = pyrr.Matrix44.from_x_rotation(rotation_speed * glfw.get_time())\r\n\r\n glUniformMatrix4fv(transform, 1, GL_FALSE, rotation_x * rotation_y * rotation_z)\r\n\r\n # Draws the triangle\r\n glDrawArrays(GL_TRIANGLES, 0, 3)\r\n\r\n # Swaps front and back buffers\r\n glfw.swap_buffers(window)\r\n # Detects any events that happen in GLFW (i.e the window closing)\r\n glfw.poll_events()\r\n\r\n print('Closing program...')\r\n glfw.terminate()\r\n\r\nmain()\r\n\r\n\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"128301072","text":"from system.link_quality import Signal\nimport os\nimport pyudev\nimport re\nimport subprocess\n\n\nclass System():\n def __init__(self):\n self.context = pyudev.Context()\n self.signal = Signal()\n self.signal.start()\n\n def getSignal(self):\n return self.signal.strength\n\n def getTemperature(self):\n f = os.popen('vcgencmd measure_temp | grep -ohP \"=[0-9]*\" | cut -c 2-')\n return f.read()\n\n def getWirelessAdapterInfo(self):\n for device in self.context.list_devices(subsystem='net', DEVTYPE='wlan'):\n external_wlan_model = device.get('ID_MODEL_FROM_DATABASE')\n if external_wlan_model is not None:\n return external_wlan_model\n return None\n\n def getCameraInfo(self):\n try:\n output = subprocess.check_output(\"v4l2-ctl --list-devices 2>/dev/null\", shell=True)\n except subprocess.CalledProcessError as e:\n output = e.output\n\n expr = r\"(?P.*) \\(.*\\):\\n\\t(?P.*)\"\n\n cameras = []\n\n for camera in re.finditer(expr, output.decode('utf-8')):\n cameras.append({\n 'model': camera.group('model'),\n 'device': camera.group('device')\n })\n\n return cameras\n\n def shutdown(self):\n subprocess.run(['poweroff'])\n\n def reboot(self):\n subprocess.run(['reboot'])","sub_path":"server/system/system.py","file_name":"system.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"583681677","text":"from kivy.uix.widget import Widget\nfrom kivy.graphics import Rectangle,Ellipse,Line,Color\n\nclass GridException(Exception):\n def __init__(self,value):\n self.value=value\n def __str__(self):\n return repr(self.value)\n\nclass GridContainer (Widget):\n def __init__(self, cellSize, posBeg, posEnd, **kwargs):\n super(GridContainer, self).__init__(**kwargs)\n self.container = []\n self.cellSize = 50\n self.N = 100\n \n\n self.N = max(int( (posEnd[0]-posBeg[0])/cellSize ) + 1,\n int( (posEnd[1]-posBeg[1])/cellSize ) + 1 )\n\n self.pos=posBeg\n\n for i in range(0,self.N):\n line = []\n for j in range(0,self.N):\n line.append(None)\n self.container.append(line)\n self.cellSize = cellSize\n\n self.gridLines=[]\n with self.canvas:\n Color(0,0,0,0.1)\n for i in range(0,self.N):\n self.gridLines.append(Line(points = [ self.pos[0]+i*self.cellSize, self.pos[1], self.pos[0]+i*self.cellSize, self.pos[1]+self.N*self.cellSize]))\n for i in range(0,self.N):\n self.gridLines.append(Line(points = [ self.pos[0], self.pos[1]+i*self.cellSize, self.pos[0]+self.N*self.cellSize, self.pos[1]+i*self.cellSize]))\n \n\n\n def getIndex(self,pos):\n posRel = (pos[0]-self.pos[0], pos[1]-self.pos[1])\n ix = int(posRel[0]/self.cellSize)\n iy = int(posRel[1]/self.cellSize)\n if ix<0 or ix>=self.N or iy<0 or iy>=self.N:\n raise GridException(\"Out of the grid\")\n return ix,iy\n\n def addObject(self, pos, object):\n iPos = self.getIndex(pos)\n object.gridPos=iPos\n self.container[iPos[0]][iPos[1]] = object\n return object\n\n def deleteObject(self, gridPos):\n self.container[gridPos[0]][gridPos[1]].gridPos=None\n self.container[gridPos[0]][gridPos[1]] = None\n\n def getObject(self, pos):\n iPos = self.getIndex(pos)\n return self.container[iPos[0]][iPos[1]]\n\n def getObjectByIdx(self, idx):\n return self.container[idx[0]][idx[1]]\n\n def translate(self, dx, dy):\n self.pos= (self.pos[0]+dx, self.pos[1]+dy)\n for line in self.gridLines:\n line.points = [line.points[0]+dx, line.points[1]+dy, line.points[2]+dx,line.points[3]+dy]\n\n\n\n","sub_path":"gridContainer.py","file_name":"gridContainer.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"284291884","text":"import os\nimport socket\nimport unittest\n\nfrom bugsnag.configuration import Configuration\nfrom bugsnag.middleware import DefaultMiddleware\nfrom bugsnag.sessiontracker import SessionMiddleware\n\n\nclass TestConfiguration(unittest.TestCase):\n\n def test_get_endpoint_use_ssl(self):\n c = Configuration()\n c.use_ssl = True\n self.assertEqual(c.get_endpoint(), \"https://notify.bugsnag.com\")\n\n def test_get_endpoint_no_use_ssl(self):\n c = Configuration()\n c.use_ssl = False\n self.assertEqual(c.get_endpoint(), \"http://notify.bugsnag.com\")\n\n def test_custom_get_endpoint_default_ssl(self):\n c = Configuration()\n c.endpoint = \"localhost:1234\"\n self.assertEqual(c.get_endpoint(), \"https://localhost:1234\")\n\n def test_custom_get_endpoint_use_ssl(self):\n c = Configuration()\n c.use_ssl = True\n c.endpoint = \"localhost:1234\"\n self.assertEqual(c.get_endpoint(), \"https://localhost:1234\")\n\n def test_custom_get_endpoint_no_use_ssl(self):\n c = Configuration()\n c.use_ssl = False\n c.endpoint = \"localhost:1234\"\n self.assertEqual(c.get_endpoint(), \"http://localhost:1234\")\n\n def test_full_custom_get_endpoint(self):\n c = Configuration()\n c.endpoint = \"https://localhost:1234\"\n self.assertEqual(c.get_endpoint(), \"https://localhost:1234\")\n\n def test_full_custom_get_endpoint_use_ssl(self):\n c = Configuration()\n c.use_ssl = True\n c.endpoint = \"https://localhost:1234\"\n self.assertEqual(c.get_endpoint(), \"https://localhost:1234\")\n\n def test_full_custom_get_endpoint_no_use_ssl(self):\n c = Configuration()\n c.use_ssl = False\n c.endpoint = \"https://localhost:1234\"\n self.assertEqual(c.get_endpoint(), \"http://localhost:1234\")\n\n def test_reads_api_key_from_environ(self):\n os.environ['BUGSNAG_API_KEY'] = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'\n c = Configuration()\n self.assertEqual(c.api_key, 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')\n self.assertEqual(c.project_root, os.getcwd())\n\n def test_should_notify(self):\n # Test custom release_stage\n c = Configuration()\n c.release_stage = \"anything\"\n self.assertTrue(c.should_notify())\n\n # Test release_stage in notify_release_stages\n c = Configuration()\n c.notify_release_stages = [\"production\"]\n c.release_stage = \"development\"\n self.assertFalse(c.should_notify())\n\n # Test release_stage in notify_release_stages\n c = Configuration()\n c.notify_release_stages = [\"custom\"]\n c.release_stage = \"custom\"\n self.assertTrue(c.should_notify())\n\n def test_ignore_classes(self):\n # Test ignoring a class works\n c = Configuration()\n c.ignore_classes.append(\"SystemError\")\n self.assertTrue(c.should_ignore(SystemError(\"Example\")))\n\n c = Configuration()\n c.ignore_classes.append(\"SystemError\")\n self.assertFalse(c.should_ignore(Exception(\"Example\")))\n\n def test_hostname(self):\n c = Configuration()\n self.assertEqual(c.hostname, socket.gethostname())\n\n os.environ[\"DYNO\"] = \"YES\"\n c = Configuration()\n self.assertEqual(c.hostname, None)\n\n def test_session_tracking_defaults(self):\n c = Configuration()\n self.assertEqual(c.auto_capture_sessions, False)\n self.assertEqual(c.session_endpoint, \"https://sessions.bugsnag.com\")\n\n def test_default_middleware_location(self):\n c = Configuration()\n self.assertEqual(c.internal_middleware.stack,\n [DefaultMiddleware, SessionMiddleware])\n self.assertEqual(len(c.middleware.stack), 0)\n","sub_path":"tests/test_configuration.py","file_name":"test_configuration.py","file_ext":"py","file_size_in_byte":3735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"71875986","text":"import datetime\nimport json\nimport paho.mqtt.client as mqtt\nimport simpy.rt\n\nimport camera as c\n\n\ndef publish_update(_client, _topic, _info):\n while True:\n sim_time = starting_time + datetime.timedelta(seconds=env.now)\n _client.publish(sensor.base_topic + _topic, json.dumps(_info['return'](sim_time)))\n print(\"{} - Publish on topic `{}`.\".format(sim_time.replace(microsecond=0), _topic))\n\n yield env.timeout(_info['interval'])\n\n\ndef publish_frame(_client, _topic, _info):\n while True:\n sim_time = starting_time + datetime.timedelta(seconds=env.now)\n interval = sensor.get_rate()[\"interval\"]\n if interval == 0:\n yield env.process(stop())\n\n _client.publish(sensor.base_topic + _topic, json.dumps(_info['return'](sim_time)))\n print(\"{} - Publish on topic `{}`.\".format(sim_time.replace(microsecond=0), _topic))\n\n yield env.timeout(interval)\n\n\ndef stop():\n print(\"Camera turned OFF\")\n while sensor.get_rate()[\"interval\"] == 0:\n pass\n print(\"Turning ON the camera\")\n yield env.timeout(1)\n\n\ndef on_message(client, userdata, msg):\n print(\"Received {} {}\".format(msg.topic, msg.payload.decode(\"utf-8\", \"ignore\")))\n sensor.store_update(dict(json.loads(msg.payload.decode(\"utf-8\")))[\"status\"])\n\n\ndef main():\n client = mqtt.Client(sensor.client_id)\n client.on_message = on_message\n\n print(\"connecting to the broker\", sensor.broker)\n client.connect(sensor.broker, keepalive=600)\n\n for topic in sensor.actions.keys():\n client.subscribe(sensor.base_topic + 'action/' + topic)\n # add subscriber log\n\n for topic, info in sensor.topic_passive.items():\n env.process(publish_update(client, topic, info))\n\n env.process(publish_frame(client, 'stream', sensor.topic_active['stream']))\n\n # subscriber remains active\n client.loop_start()\n\n # start the simulation\n env.run(until=float(c.SIM_DURATION))\n\n\nif __name__ == \"__main__\":\n print(\"+++ SUBSCRIBER +++\")\n starting_time = datetime.datetime.now()\n print(\"Simulation started at {}\".format(starting_time))\n env = simpy.rt.RealtimeEnvironment(factor=c.SIM_FACTOR, strict=False)\n\n sensor = c.Camera()\n\n c.get_info()\n main()\n","sub_path":"dataset/camera/camera_device.py","file_name":"camera_device.py","file_ext":"py","file_size_in_byte":2237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"611695480","text":"\"\"\"\nThis file contains tests for functions in pLDA.py.\nRun the tests using py.test.\n\"\"\"\n\nimport pLDA_Python.pLDA_train as pLDA, pLDA_Python.pLDA_predict as pLDA_predict\nimport numpy, pytest\n\n\ndirectory = \"/home/tab43/Documents/Projects/pLDA_Python/test/\"\n\n\ndef test_constructor():\n \"\"\"\n Test whether the constructor functionality works correctly.\n \"\"\"\n W = [[1,2,3],[2,4]]\n \n # No priors on alpha, beta \n alpha = [1,1]\n beta = [1,1,1,1]\n lda = pLDA.prior_LDA(W,alpha,beta)\n assert numpy.array_equal(lda.vocabulary,[1,2,3,4])\n assert lda.M == 2 and lda.V == 4\n assert set(lda.word_id_map.items()) == set({1:0,2:1,3:2,4:3}.items())\n assert lda.prior_alpha == False and lda.prior_beta == False\n assert numpy.array_equal(lda.alpha,[[1,1],[1,1]])\n assert numpy.array_equal(lda.beta,[[1,1,1,1],[1,1,1,1]])\n \n # Prior on alpha \n alpha = [[1,1],[1,1]]\n lda = pLDA.prior_LDA(W,alpha,beta)\n assert lda.prior_alpha == True and lda.prior_beta == False\n assert numpy.array_equal(lda.beta,[[1,1,1,1],[1,1,1,1]])\n \n # Prior on both \n alpha = [[1,1],[1,1]]\n beta = [[1,1,1,1],[1,1,1,1]]\n lda = pLDA.prior_LDA(W,alpha,beta)\n assert lda.prior_alpha == True and lda.prior_beta == True\n \n # Prior on beta \n alpha = [1,1]\n lda = pLDA.prior_LDA(W,alpha,beta)\n assert lda.prior_alpha == False and lda.prior_beta == True\n assert numpy.array_equal(lda.alpha,[[1,1],[1,1]])\n \n return\n\n\ndef test_sample_from_dist():\n \"\"\"\n We test here whether the function that samples a value from the given\n probabilities raises the correct exception.\n \"\"\"\n values = ['1','2','3']\n probabilities = [0.5,0.5,0]\n for i in range(0,20):\n v = pLDA.sample_from_dist(values,probabilities)\n assert v in ['1','2']\n \n probabilities = [0.0,0.0,0.0]\n with pytest.raises(Exception):\n v = pLDA.sample_from_dist(values,probabilities)\n\n return\n \n \ndef test_initial_topic_probabilities():\n # No prior\n alpha = [1,2]\n beta = [3,4]\n W = [[1,2]]\n lda = pLDA.prior_LDA(W,alpha,beta) #K=2\n \n probabilities = lda.initial_topic_probabilities(0,0,1)\n assert numpy.array_equal(probabilities,[0.5,0.5])\n \n # Prior over alpha\n alpha = [[1,3]]\n lda = pLDA.prior_LDA(W,alpha,beta)\n \n probabilities = lda.initial_topic_probabilities(0,0,1)\n assert numpy.array_equal(probabilities,[0.25,0.75])\n \n # Prior over beta\n beta = [[3,2],[1,2]]\n alpha = [1,2]\n lda = pLDA.prior_LDA(W,alpha,beta)\n \n probabilities1 = lda.initial_topic_probabilities(0,0,1)\n assert numpy.array_equal(probabilities1,[0.75,0.25])\n probabilities2 = lda.initial_topic_probabilities(0,1,2)\n assert numpy.array_equal(probabilities2,[0.5,0.5])\n \n # Prior on both - t1 can choose 1,2 equally likely, t2 can choose only t2. prob of t2 = 0.75, prob of t1 = 0.25\n alpha = [[1,3]]\n beta = [[1,1],[0,1]]\n lda = pLDA.prior_LDA(W,alpha,beta)\n \n probabilities1 = lda.initial_topic_probabilities(0,0,1)\n assert numpy.array_equal(probabilities1,[1,0])\n probabilities2 = lda.initial_topic_probabilities(0,1,2)\n assert numpy.array_equal(probabilities2,[0.25,0.75])\n\n return\n \n\ndef test_initialisation():\n \"\"\"\n Here we test whether the initialisation of pLDA works correctly.\n We consider an example where each word can only be generated from one \n topic, which makes it a deterministic process.\n There are three topics, and a vocabulary size of 4. The ith unique word \n maps to the ith topic, except the fourth which maps to the 3rd topic as well.\n \"\"\"\n alpha = [1,1,1] # -> [[1,1,1],[1,1,1]]\n beta = [[1,0,0,0],[0,1,0,0],[0,0,1,1]]\n W = [\n [2,2,1,0],\n [3,2,0]\n ]\n Z = [\n [2,2,1,0],\n [2,2,0]\n ]\n cdt = [[1,1,2],[1,0,2]]\n ctv = [[2,0,0,0],[0,1,0,0],[0,0,3,1]]\n ct_sum = [2,1,4]\n beta_sum = [1,1,2] \n \n theta = [[2.0/7.0,2.0/7.0,3.0/7.0],[1.0/3.0,1.0/6.0,1.0/2.0]] # cdt+alpha, normalised row-wise\n phi = [[1,0,0,0],[0,1,0,0],[0,0,2.0/3.0,1.0/3.0]] # ctv+beta, normalised row-wise\n \n lda = pLDA.prior_LDA(W,alpha,beta)\n lda.initialise_topics()\n \n assert numpy.array_equal(lda.alpha,[alpha,alpha])\n assert numpy.array_equal(lda.alpha_sum,[3,3])\n assert numpy.array_equal(lda.beta,beta)\n assert numpy.array_equal(lda.beta_sum,[1,1,2])\n assert numpy.array_equal(lda.Z,Z)\n assert numpy.array_equal(lda.cdt,cdt)\n assert numpy.array_equal(lda.ctv,ctv)\n assert numpy.array_equal(lda.ct_sum,ct_sum)\n assert numpy.array_equal(lda.beta_sum,beta_sum)\n assert numpy.array_equal(lda.theta,theta)\n assert numpy.array_equal(lda.phi,phi)\n \n \ndef test_gibbs_train():\n \"\"\"\n Test a single iteration of the collapsed Gibbs sampler, using the toy\n example above, meaning we expect none of the topic assignments to change\n (mainly checks for functionality of code, not correctness).\n \"\"\" \n alpha = [1,1,1] # -> [[1,1,1],[1,1,1]]\n beta = [[1,0,0,0],[0,1,0,0],[0,0,1,1]]\n W = [\n [0,1,2,2],\n [3,2,0]\n ]\n Z = [\n [0,1,2,2],\n [2,2,0]\n ]\n cdt = [[1,1,2],[1,0,2]]\n ctv = [[2,0,0,0],[0,1,0,0],[0,0,3,1]]\n ct_sum = [2,1,4]\n beta_sum = [1,1,2] \n theta = [[2.0/7.0,2.0/7.0,3.0/7.0],[1.0/3.0,1.0/6.0,1.0/2.0]] # cdt+alpha, normalised row-wise\n phi = [[1,0,0,0],[0,1,0,0],[0,0,2.0/3.0,1.0/3.0]] # ctv+beta, normalised row-wise\n \n lda = pLDA.prior_LDA(W,alpha,beta)\n lda.initialise_topics()\n lda.run_gibbs_train(iterations=1,results=directory+\"results/\")\n \n assert numpy.array_equal(lda.alpha,[alpha,alpha])\n assert numpy.array_equal(lda.beta,beta)\n assert numpy.array_equal(lda.Z,Z)\n assert numpy.array_equal(lda.cdt,cdt)\n assert numpy.array_equal(lda.ctv,ctv)\n assert numpy.array_equal(lda.ct_sum,ct_sum)\n assert numpy.array_equal(lda.beta_sum,beta_sum)\n assert numpy.array_equal(lda.theta,theta) #topic assignments don't change to distributions don't change\n assert numpy.array_equal(lda.phi,phi)\n \n \ndef test_gibbs_predict():\n \"\"\"\n Test whether the run_gibbs_predict function works correctly, and whether\n the variables afterwards are still the same as before (in particular, W,\n theta, phi).\n We test it on a model trained on the documents of \n \"\"\"\n alpha = [1,1,1] # -> [[1,1,1],[1,1,1]]\n beta = [[1,0,0,0],[0,1,0,0],[0,0,1,1]]\n W = [\n [0,1,2,2],\n [3,2,0]\n ]\n Z = [\n [0,1,2,2],\n [2,2,0]\n ]\n cdt = [[1,1,2],[1,0,2]]\n ctv = [[2,0,0,0],[0,1,0,0],[0,0,3,1]]\n ct_sum = [2,1,4]\n beta_sum = [1,1,2] \n theta = [[2.0/7.0,2.0/7.0,3.0/7.0],[1.0/3.0,1.0/6.0,1.0/2.0]] # cdt+alpha, normalised row-wise\n phi = [[1,0,0,0],[0,1,0,0],[0,0,2.0/3.0,1.0/3.0]] # ctv+beta, normalised row-wise\n \n lda = pLDA.prior_LDA(W,alpha,beta)\n lda.initialise_topics()\n lda.run_gibbs_train(iterations=1,results=directory+\"results/\")\n \n last_ctv = lda.ctv \n word_id_map = lda.word_id_map\n \n # New documents to classify: \n new_docs = [[0],[1,2],[2,3]]\n \n lda_pred = pLDA_predict.prior_LDA_predict(new_docs,last_ctv,alpha,beta,word_id_map)\n lda_pred.run_gibbs_predict(iterations=1,results=directory+\"predictions/\")\n \n theta_exp = [[0.5,0.25,0.25],[0.2,0.4,0.4],[0.2,0.2,0.6]] #non-zero values due to prior alpha\n Z_exp = [[0],[1,2],[2,2]]\n new_ctv = [[3,0,0,0],[0,2,0,0],[0,0,5,2]]\n new_ct_sum = [3,2,7]\n \n assert numpy.array_equal(lda_pred.lda.theta,theta_exp)\n assert numpy.array_equal(lda_pred.lda.Z,Z_exp)\n assert numpy.array_equal(lda_pred.lda.ctv,new_ctv)\n assert numpy.array_equal(lda_pred.lda.ct_sum,new_ct_sum)\n \n \"\"\"\n When predicting a new document with an unseen word, it should raise an \n UnseenWordException.\n \"\"\"\n unseen_word_doc = [[4]]\n with pytest.raises(pLDA.UnseenWordException):\n lda_pred = pLDA_predict.prior_LDA_predict(unseen_word_doc,last_ctv,alpha,beta,word_id_map)\n lda_pred.run_gibbs_predict(iterations=1,results=directory+\"predictions/\")\n \n\n\ndef test_impossible_prior():\n \"\"\"\n Here we test whether we get an Exception if we see a word in a document\n that could not possibly be generated from the topics given, given that\n our priors over alpha and beta make that impossible.\n We also test getting an exception for only prior knowledge of alpha,\n and only of beta.\n \n We have:\n 2 topics\n 1 document with 3 words, ['word1','word2','word1']\n Vocabulary: ['word1','word2']\n Prior alpha: [[1,0]] (topic 1 can be selected, topic 2 can't)\n Prior beta: [[1,0],[0,1]] (topic 1 can select 'word1', topic 2 can select 'word2')\n \n So if we have both priors, word1 can be chosen from topic 1, but word2 \n cannot be chosen. So we expect an exception.\n \"\"\"\n W = [['word1','word2','word1']]\n alpha = [[1,0]]\n beta = [[1,0],[0,1]]\n \n lda = pLDA.prior_LDA(W,alpha,beta)\n with pytest.raises(pLDA.ImpossibleWordException):\n lda.initialise_topics()\n \n # Now a simpler example with just an alpha prior, and then just a beta prior\n alpha = [[0,0]] # No topic can be selected for the doc\n beta = [1,1]\n \n lda = pLDA.prior_LDA(W,alpha,beta)\n with pytest.raises(pLDA.ImpossibleWordException):\n lda.initialise_topics()\n \n alpha = [1,1]\n beta = [[1,0],[1,0]] # word2 cannot be selected\n \n lda = pLDA.prior_LDA(W,alpha,beta)\n with pytest.raises(pLDA.ImpossibleWordException):\n lda.initialise_topics()\n \n return\n \n \ndef test_impossible_prediction(): \n \"\"\" \n We test whether we get an ImpossibleWordException when predicting a new \n document containing a word that could not be generated from our model.\n \n We have a similar setup to , but now our training\n data is possible, and we place a different prior alpha over our document\n to predict, s.t. it is impossible to generate.\n \n \"\"\"\n W = [['word1','word2','word1']]\n alpha = [[1,1]]\n beta = [[1,0],[0,1]]\n lda = pLDA.prior_LDA(W,alpha,beta)\n lda.initialise_topics()\n lda.run_gibbs_train(iterations=1,results=directory+\"results/\")\n \n word_id_map = lda.word_id_map\n last_ctv = lda.ctv\n new_doc = [['word1','word2']]\n new_alpha = [[1,0]] #topic 2 can't be selected, so we cannot generate 'word2'\n with pytest.raises(pLDA.ImpossibleWordException):\n lda_pred = pLDA_predict.prior_LDA_predict(new_doc,last_ctv,new_alpha,beta,word_id_map)\n lda_pred.run_gibbs_predict(iterations=1,results=directory+\"predictions/\")\n \n return\n \n \n ","sub_path":"test/test_pLDA.py","file_name":"test_pLDA.py","file_ext":"py","file_size_in_byte":10750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"8178071","text":"#!/usr/bin/env python3\n\"\"\"Find the town judge: LeetCode contest challenge.\nhttps://leetcode.com/contest/weekly-contest-125/problems/find-the-town-judge/\n\nAlvaro Leal , 2019\n\"\"\"\nimport pytest\nfrom main import find_town_judge\n\n\n\n@pytest.mark.parametrize(\"num, data, expected_output\", [\n (2, [[1, 2]], 2),\n (3, [[1, 3], [2, 3]], 3),\n (3, [[1, 3], [2, 3], [3, 1]], -1),\n (3, [[1, 2], [2, 3]], -1),\n (4, [[1, 3], [1, 4], [2, 3], [2, 4], [4, 3]], 3),\n (1, [], 1)\n])\ndef test_find_town_judge(num, data, expected_output):\n assert find_town_judge(num, data) == expected_output\n\n\n\n\n\n# Input: N = 2, trust = [[1,2]]\n# Output: 2\n# Example 2:\n\n# Input: N = 3, trust = [[1,3],[2,3]]\n# Output: 3\n# Example 3:\n\n# Input: N = 3, trust = [[1,3],[2,3],[3,1]]\n# Output: -1\n# Example 4:\n\n# Input: N = 3, trust = [[1,2],[2,3]]\n# Output: -1\n# Example 5:\n\n# Input: N = 4, trust = [[1,3],[1,4],[2,3],[2,4],[4,3]]\n# Output: 3","sub_path":"contests/virtual/125/1.find_the_town_judge/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"13062069","text":"from django.shortcuts import render, HttpResponse, redirect\nfrom datetime import datetime\n\n\ndef index(request):\n return render(request, 'sessionwords/index.html')\n\ndef submit(request):\n if request.method == 'POST':\n date = datetime.now().strftime('%-I:%-M:%S%p, %b %-d %Y')\n content = {\n 'word' : request.POST['word'],\n 'color' : request.POST['color'],\n 'big' : request.POST['big'],\n 'added' : str(date),\n }\n if not 'list' in request.session:\n request.session['list'] = []\n saved_list = request.session['list']\n saved_list.append(content)\n request.session['list'] = saved_list\n print (request.session['list'])\n return redirect('/')\n\ndef clear(request):\n request.session.clear()\n return('/')","sub_path":"Python3.6/Django/5SessionWords/SessionWords/apps/sessionwords/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"417018529","text":"from django.shortcuts import render\nfrom . import models\nimport tushare as ts\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\n\npd.set_option('max_colwidth', 20000)\n\ndef regist(request):\n pass\n\ndef login(request):\n pass\n\ndef newspage(request):\n info = ts.get_latest_news(top=2, show_content=True)\n news = models.News()\n news.title = info.title[0].__str__()\n news.content = info.content[0].__str__()\n news.save()\n news = models.News()\n news.title = info.title[1].__str__()\n news.content = info.content[1].__str__()\n news.save()\n news = models.News.objects.all()\n return render(request, 'news.html', {'news': news})\n\ndef shownews(request, news_id):\n news = models.News.objects.get(pk=news_id)\n return render(request, 'newsdetail.html', {'news': news})\n\ndef recommend(request):\n if not models.RecommendFund.objects.all():\n r = requests.get('http://fund.eastmoney.com/trade/default.html')\n encode_content = r.content.decode('gb2312')\n soup = BeautifulSoup(encode_content, 'lxml')\n name = soup.find_all('td', 'fname')\n pattern1 = re.compile(\"(\\d\\d\\d\\d\\d\\d)\")\n code = re.findall(pattern1, encode_content)\n rate = []\n for item in code[0:25]:\n r = requests.get('http://fund.eastmoney.com/pingzhongdata/' + item + '.js')\n pattern3 = re.compile('var syl_1n=\"(.*?)\"')\n tmp = re.findall(pattern3, r.text)\n #tmp[0] += '%'\n rate.append(tmp[0])\n for i in range(0, 25):\n recF = models.RecommendFund()\n recF.code = code[i]\n recF.name = name[i].string\n recF.annualrate = rate[i]\n recF.save()\n recF = models.RecommendFund.objects.all()\n return render(request, 'recommend.html', {'recF': recF})\n\ndef tutorial(request):\n pass\n\ndef showstock(request):\n pass\n\ndef showfund(request, fund_code):\n fund_code = str(fund_code)\n while len(fund_code) < 6:\n fund_code = '0' + fund_code\n r = requests.get('http://fund.eastmoney.com/pingzhongdata/' + fund_code + '.js')\n pattern0 = re.compile('var fS_name = \"(.*?)\"')\n name = re.findall(pattern0, r.text)\n print(name, fund_code)\n pattern1 = re.compile('var syl_1n=\"(.*?)\"')\n oneyear = re.findall(pattern1, r.text)\n pattern2 = re.compile('var syl_6y=\"(.*?)\"')\n sixmonth = re.findall(pattern2, r.text)\n pattern3 = re.compile('var syl_3y=\"(.*?)\"')\n threemonth = re.findall(pattern3, r.text)\n pattern4 = re.compile('var syl_1y=\"(.*?)\"')\n onemonth = re.findall(pattern4, r.text)\n pattern5 = re.compile('\"y\":(.*?),\"equityReturn\"')\n price = re.findall(pattern5, r.text)\n pattern6 = re.compile('\"equityReturn\":(.*?),\"unitMoney\"')\n rate = re.findall(pattern6, r.text)\n fund = models.Fund()\n fund.code = fund_code\n fund.name = name[0]\n fund.annualrate = oneyear[0]\n fund.sixmrate = sixmonth[0]\n fund.threemrate = threemonth[0]\n fund.onemrate = onemonth[0]\n fund.price = price[-1]\n fund.currentrate = rate[-1]\n fund.save()\n return render(request, 'funddetail.html', {'fund': fund})\n\n\n","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"391103147","text":"# -*- mode: python ; coding: utf-8 -*-\n\nblock_cipher = None\n\nSETUP_DIR = 'D:\\\\doc\\\\pd_code\\\\AutoTest\\\\test\\\\shujia\\\\'\n\n\na = Analysis(['main.py',\n'testcase\\\\test_analy_page.py',\n'testcase\\\\test_label_page.py',\n'testcase\\\\test_label_senior_page.py',\n'testcase\\\\test_list_page.py',\n'testcase\\\\test_zf_calculate_page.py',\n'testcase\\\\test_sql_search_page.py',\n'testcase\\\\test_policy_create_page.py',\n'pages\\\\analyPage.py',\n'pages\\\\basePage.py',\n'pages\\\\indexPage.py',\n'pages\\\\labelPage.py',\n'pages\\\\listPage.py',\n'pages\\\\loginPage.py',\n'pages\\\\zfCalculatePage.py',\n'pages\\\\sqlSearchPage.py',\n'pages\\\\policyCreatePage.py',\n'pages\\\\reportDesignPage.py',\n'common\\\\param.py'\n],\n pathex=['D:\\\\doc\\\\pd_code\\\\AutoTest\\\\test\\\\shujia'],\n binaries=[('C:\\\\Program Files\\\\Google\\\\Chrome\\\\Application\\\\chromedriver.exe','.'),\n \n ('D:\\\\Program Files\\\\Python38\\\\Lib\\\\site-packages\\\\allure_pytest-2.8.18.dist-info','allure_pytest-2.8.18.dist-info'),\n ('D:\\\\Program Files\\\\Python38\\\\Lib\\\\site-packages\\\\allure-2.7.0','allure-2.7.0'),\n ('D:\\\\Program Files\\\\Python38\\\\Lib\\\\site-packages\\\\pytest_rerunfailures-9.0.dist-info','pytest_rerunfailures-9.0.dist-info'),\n ('D:\\\\doc\\\\pd_code\\\\AutoTest\\\\test\\\\shujia\\\\to_html.exe','.')\n ],\n datas=[\n (SETUP_DIR+'file','file'),(SETUP_DIR+'testcase','testcase'),(SETUP_DIR+'conftest.py','.'),('D:\\\\Program Files\\\\Python38\\\\Lib\\\\site-packages\\\\pytest_rerunfailures.py','.')\n ],\n hiddenimports=[],\n hookspath=[],\n runtime_hooks=[],\n excludes=[],\n win_no_prefer_redirects=False,\n win_private_assemblies=False,\n cipher=block_cipher,\n noarchive=False)\npyz = PYZ(a.pure, a.zipped_data,\n cipher=block_cipher)\na.datas +=[('zc.ico','D:\\\\doc\\\\pd_code\\\\AutoTest\\\\test\\\\shujia\\\\zc.ico','DATA')]\nexe = EXE(pyz,\n a.scripts,\n [],\n exclude_binaries=True,\n name='main',\n debug=False,\n bootloader_ignore_signals=False,\n strip=False,\n upx=True,\n console=True )\ncoll = COLLECT(exe,\n a.binaries,\n a.zipfiles,\n a.datas,\n strip=False,\n upx=True,\n upx_exclude=[],\n name='main')\n","sub_path":"main.spec","file_name":"main.spec","file_ext":"spec","file_size_in_byte":2387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"107136971","text":"import numpy\n\nmax_row = 0\nmax_column = 0\nresult_string = ''\n\n\ndef main():\n global max_row\n global max_column\n lines = []\n with open('./d19-task.txt', 'r') as reader:\n line = reader.readline()\n while line != '':\n line = line.replace(\"\\n\", \"\")\n if len(line) > max_column:\n max_column = len(line)\n lines.append(line)\n max_row += 1\n line = reader.readline()\n matrix = numpy.empty([max_row, max_column], dtype=str)\n for i in range(max_row):\n line = lines[i]\n for j in range(max_column):\n if j < len(line):\n matrix[i][j] = line[j]\n if i == 0 and line[j] == '|':\n start_position = j\n result = search(matrix, 0, start_position, 'down')\n counter = 0\n while True:\n result = search(matrix, result[0], result[1], result[2])\n counter += 1\n if result[2] == 'end':\n break\n print(counter)\n\n\ndef search(matrix, row, column, direction):\n global result_string\n if matrix[row][column] == '+':\n new_direction = check_cells(row, column, direction, matrix)\n return new_direction[1], new_direction[2], new_direction[0]\n elif matrix[row][column] not in ('|', '+', '-', '', ' '):\n result_string += matrix[row][column]\n elif matrix[row][column] in('', ' '):\n return 0, 0, 'end'\n if direction == 'down':\n return row + 1, column, 'down'\n elif direction == 'up':\n return row - 1, column, 'up'\n elif direction == 'right':\n return row, column + 1, 'right'\n elif direction == 'left':\n return row, column - 1, 'left'\n\n\ndef check_cells(row, column, direction, matrix):\n global max_row\n global max_column\n for i in (row - 1, row, row + 1):\n if i >= max_row or i < 0:\n continue\n for j in (column - 1, column, column + 1):\n if j >= max_column or j < 0:\n continue\n if row == i and column == j:\n continue\n if direction == 'down':\n if i == row - 1 and j == column:\n continue\n elif direction == 'up':\n if i == row + 1 and j == column:\n continue\n elif direction == 'right':\n if i == row and j == column - 1:\n continue\n elif direction == 'left':\n if i == row and j == column + 1:\n continue\n if matrix[i][j] != '' and matrix[i][j] != ' ':\n if i == row:\n if j == column - 1:\n return 'left', i, j\n else:\n return 'right', i, j\n elif i == row + 1:\n return 'down', i, j\n else:\n return 'up', i, j\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Day19/d19-task2.py","file_name":"d19-task2.py","file_ext":"py","file_size_in_byte":2930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"348794988","text":"import matplotlib as mpl\nfrom matplotlib import axes\nfrom matplotlib import pyplot as plt\nfrom matplotlib import gridspec\n\nimport math\nimport numpy as np\n\n\ndef plot_many_data_sets(data, labels, distances, mic_locations, mics, accessions, antibiotic, detector):\n\n if len(data) <= 5:\n rows = 1\n elif 5 < len(data) <= 9:\n rows = 2\n else:\n rows = math.sqrt(len(data)-1)\n\n cols = math.ceil((len(data) - 1) / rows) if len(data) > 1 else 1\n\n fig = plt.figure(figsize=(9, 6))\n fig.suptitle(accessions[0] + ' for ' + antibiotic + ' and ' + detector, x=0.2)\n\n gs = gridspec.GridSpec(rows+1, cols)\n\n index = np.arange(len(labels))\n\n ax_main_point = plt.subplot(gs[0, :])\n _help_plot(ax_main_point, index, data[0], labels, accessions[0] + ' | MIC: ' + str(mics[0]), 'y', mic_locations[0])\n\n\n for i, d in enumerate(data):\n if i == 0:\n continue\n\n row_index = (i-1) // cols + 1\n\n if rows == 1:\n col_index = i-1\n else:\n col_index = (i-1) % cols\n\n ax_i = plt.subplot(gs[row_index, col_index])\n _help_plot(ax_i, index, d, labels, accessions[i] + ' | MIC: ' + str(mics[i]) + ' | Dist: ' + '{0:.2f}'.format(distances[i]), 'b', mic_locations[i])\n\n\n fig.tight_layout()\n return fig\n\n\ndef _help_plot(axis, index, data, labels, title, color_code, mic_location, mic_color='r'):\n bars = axis.bar(index, data, color=color_code)\n if mic_location is not None and len(bars) > mic_location:\n bars[mic_location].set_color(mic_color)\n axis.set_title(title)\n axis.set_xticks(index)\n axis.set_xticklabels(labels=labels)","sub_path":"algorithm development/data_quality_run/nearest_neighbors/plot_nearest_neighbors.py","file_name":"plot_nearest_neighbors.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"431521123","text":"import zipfile\nimport urllib\nimport urllib.request\nresponse = urllib.request.urlopen('http://revistas.inpi.gov.br/txt/P2095.zip')\ndata = response.read()\n \n# Write data to file\nfilename = \"P2095.zip\"\nfile_ = open(filename, 'w+b')\nfile_.write(data)\nfile_.close()\n\nz = zipfile.ZipFile(\"P2095.zip\")\nz.extractall()\n","sub_path":"inpi.py","file_name":"inpi.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"478125329","text":"import networkx as nx\nfrom matplotlib import pyplot as plt\nimport sys\nread=lambda:sys.stdin.readline().strip()\nwrite=lambda x:sys.stdout.write(str(x)+\"\\n\")\nG = nx.Graph()\nV, E = map(int, read().split())\nG.add_nodes_from([i for i in range(1, V+1)])\nfor i in range(E):\n u, v, w = (int(n) for n in read().split())\n G.add_edge(u, v, weight=w)\n\npos = nx.spring_layout(G)\nnx.draw_networkx_nodes(G, pos)\nnx.draw_networkx_edges(G, pos, edgelist=G.edges(), width=3, alpha=.5)\nnx.draw_networkx_labels(G, pos, {n:n for n in G.node}, fontsize=12)\nplt.show()\n\n\n'''\ninput example\n7 14\n1 2 2\n1 3 4\n1 4 1\n1 6 5\n2 3 4\n3 4 2\n4 6 3\n2 7 3\n3 7 1\n2 5 5\n3 5 6\n4 5 3\n5 6 4\n5 7 4\n'''","sub_path":"networkx/drawing_practice.py","file_name":"drawing_practice.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"387419024","text":"import csv\nimport sys\nfrom netaddr import *\n\ninfile = sys.stdin\noutfile = sys.stdout\n\nr = csv.DictReader(infile)\nheader = r.fieldnames\n\nw = csv.DictWriter(outfile, fieldnames=r.fieldnames)\nw.writeheader()\n\nfor result in r:\n\tr1 = IPRange(result['low_ip'], result['high_ip'])\n\taddrs = list(r1)\n\tall_ips=''\n\tfor ip in addrs:\n\t\tresult['all_ips'] = str(ip)\n\t\tw.writerow(result)\n\n\t","sub_path":"powertools/bin/iprange.py","file_name":"iprange.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"461920310","text":"# Выведите значение наименьшего нечетного элемента списка,\n# гарантируется, что хотя бы один нечётный элемент в списке есть.\n\n\ndef isMin(xList):\n i = 0\n while xList[i] % 2 == 0:\n i += 1\n minX = xList[i]\n\n for j in range(i, len(xList)):\n if xList[j] % 2 != 0 and minX > xList[j]:\n minX = xList[j]\n return minX\n\n\nxList2 = list(map(int, input().split()))\nprint(isMin(xList2))\n","sub_path":"Week5/Наименьший нечетный.py","file_name":"Наименьший нечетный.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"569193658","text":"import csv\n\nimport requests\nfrom config import VK_API_TOKEN\n\n\ndef get_wall_posts(domain):\n wall_get_method = 'https://api.vk.com/method/wall.get'\n version = 5.103\n count = 10\n\n params = {\n 'access_token': VK_API_TOKEN,\n 'v': version,\n 'domain': domain,\n 'count': count,\n }\n\n all_posts = []\n\n for offset in range(0, 1000, 100):\n params['offset'] = offset\n response = requests.get(wall_get_method, params=params)\n data = response.json()['response']['items']\n all_posts.extend(data)\n\n return all_posts\n\n\ndef file_writer(data):\n with open('result.csv', 'w') as file:\n pen = csv.writer(file)\n pen.writerow(('likes', 'body', 'url'))\n for post in data:\n if ('attachments' in post.keys()) and post['attachments'][0]['type'] == 'photo':\n img_url = post['attachments'][0]['photo']['sizes'][-1]['url']\n else:\n img_url = 'pass'\n\n pen.writerow((post['likes']['count'], post['text'], img_url))\n\n\ngroup_domain = input()\nposts = get_wall_posts(group_domain)\nfile_writer(posts)\n","sub_path":"WallGetPars.py","file_name":"WallGetPars.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"613652746","text":"# The surface of the Earth is curved, and the distance between degrees of longitude varies with latitude\r\n# As a result, finding the distance between two points on the surface\r\n# of the Earth is more complicated than simply using the Pythagorean theorem.\r\n# Let (t1, g1) and (t2, g2) be the latitude and longitude of two points on the Earth’s\r\n# surface. The distance between these points, following the surface of the Earth, in\r\n# kilometers is: distance = 6371.01 × arccos(sin(t1) × sin(t2) + cos(t1) × cos(t2) × cos(g1 − g2))\r\n# The value 6371.01 in the previous equation wasn’t selected at random.\r\n# It is the average radius of the Earth in kilometers.\r\n# Create a program that allows the user to enter the latitude and longitude of two\r\n# points on the Earth in degrees. Your program should display the distance between\r\n# the points, following the surface of the earth, in kilometers.\r\n\r\n# Hint: Python’s trigonometric functions operate in radians. As a result, you will\r\n# need to convert the user’s input from degrees to radians before computing the\r\n# distance with the formula discussed previously. The math module contains a\r\n# function named radians which converts from degrees to radians.\r\n\r\n##\r\n# Calculate distance between 2 points, following the surface of the earth in kilometers\r\n#\r\n\r\nimport math\r\nfrom math import sin\r\nfrom math import cos\r\n\r\n# Read input from the user\r\n\r\ndegress_1 = float(input(\"Enter the first latitude: \"))\r\ndegress_2 = float(input(\"Enter the first longitude: \"))\r\n\r\ndegress_3 = float(input(\"Enter the second latitude: \"))\r\ndegress_4 = float(input(\"Enter the second longitude: \"))\r\n\r\n# Convert from degrees to radians\r\n\r\nt1 = degress_1 * (3.14 / 180)\r\ng1 = degress_2 * (3.14 / 180)\r\nt2 = degress_3 * (3.14 / 180)\r\ng2 = degress_4 * (3.14 / 180)\r\n\r\n# Compute display the distance between 2 points\r\n\r\ndistance = 6371.01 * math.acos(sin(t1) * sin(t2) + cos(t1) * cos(t2) * cos(g1 - g2))\r\n\r\n# Display the result\r\n\r\nprint(\"Distance between 2 points the surface of the earth in kilometers: %2.f\" % distance, \"km\")\r\n\r\n","sub_path":"Exercise_012_Distance_Between_Two_Points_on_Earth.py","file_name":"Exercise_012_Distance_Between_Two_Points_on_Earth.py","file_ext":"py","file_size_in_byte":2065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"393308274","text":"from ..attacks.attack import Attack\nfrom ..classifiers.classifier import Classifier\nfrom ..utils import data_utils\nfrom ..utils.trainer import trainer\nfrom ..models.tf.conv_net_meta_classifier import ConvNetMetaClassifier\nfrom ..utils.model_utils import copy_and_reset_model\nfrom ..output.user_output_property_inference_attack import (\n UserOutputPropertyInferenceAttack,\n)\n\nimport numpy as np\nimport torch\nfrom torch import nn\nimport tensorflow as tf\nimport logging\nfrom tqdm.contrib.logging import logging_redirect_tqdm\nfrom tqdm import tqdm\nimport sys\nfrom typing import Tuple, Dict, List, Union\nfrom art.estimators.classification import TensorFlowV2Classifier, PyTorchClassifier\nfrom collections import OrderedDict\nimport warnings\n\n# count of shadow training sets, must be even\nAMOUNT_SETS = 2\n# ratio and size for unbalanced data sets\nSIZE_SET = 1000\n# ratios for different properties in sub-attacks\nRATIOS_FOR_ATTACK = [\n 0.05,\n 0.1,\n 0.15,\n 0.2,\n 0.25,\n 0.3,\n 0.35,\n 0.4,\n 0.45,\n 0.55,\n 0.6,\n 0.65,\n 0.7,\n 0.75,\n 0.8,\n 0.85,\n 0.9,\n 0.95,\n]\n# classes the attack should be performed on\nCLASSES = [0, 1]\n\n\nclass PropertyInferenceAttack(Attack):\n def __init__(\n self,\n target_model: Classifier,\n dataset: Tuple[np.ndarray, np.ndarray],\n amount_sets: int = AMOUNT_SETS,\n size_set: int = SIZE_SET,\n ratios_for_attack: List[int] = RATIOS_FOR_ATTACK,\n classes: List[int] = CLASSES,\n verbose: int = 0,\n ):\n \"\"\"\n Initialize the Property Inference Attack Class.\n :param target_model: the target model to be attacked\n :param dataset: dataset for training of shadow classifiers, test_data from dataset\n :param amount_sets: count of shadow training sets, must be even\n :param size_set: ratio and size for unbalanced data sets\n :param ratios_for_attack: ratios for different properties in sub-attacks\n with concatenation [test_features, test_labels]\n :param classes: classes the attack should be performed on\n :param verbose: 0: no information; 1: backbone (most important) information; 2: utterly detailed information will be printed\n \"\"\"\n self.logger = logging.getLogger(__name__)\n if verbose == 2:\n level = logging.DEBUG\n elif verbose == 1:\n level = logging.INFO\n else:\n level = logging.WARNING\n\n self.logger.setLevel(level)\n\n if not (\n isinstance(dataset, tuple)\n and list(map(type, dataset)) == [np.ndarray, np.ndarray]\n ):\n raise TypeError(\"Dataset type should be of shape (np.ndarray, np.ndarray).\")\n\n self.dataset = dataset\n if not (\n isinstance(target_model, TensorFlowV2Classifier)\n or isinstance(target_model, PyTorchClassifier)\n ):\n raise TypeError(\"Target model must be of type Classifier.\")\n\n # count of shadow training sets, must be even\n self.amount_sets = amount_sets\n if self.amount_sets % 2 != 0 or self.amount_sets < 2:\n raise ValueError(\n \"Number of shadow classifiers must be even and greater than 1.\"\n )\n self.classes = classes\n if len(self.classes) != 2:\n raise ValueError(\"Currently attack only works with two classes.\")\n for class_number in self.classes:\n if class_number not in dataset[1]:\n raise ValueError(f\"Class {class_number} does not exist in dataset.\")\n\n self.size_set = size_set\n for i in classes:\n length_class = len((np.where(dataset[1] == i))[0])\n if length_class < size_set:\n size_set_old = size_set\n size_set = length_class\n warning_message = (\n \"Warning: Number of samples for class {} is {}. \"\n \"This is smaller than the given size set ({}). \"\n \"{} is now the new size set.\"\n ).format(i, length_class, size_set_old, size_set)\n warnings.warn(warning_message)\n self.ratios_for_attack = ratios_for_attack\n\n if len(ratios_for_attack) < 1:\n raise ValueError(\n \"Ratios for different properties in sub-attacks can't have length zero.\"\n )\n\n self.input_shape = self.dataset[0][0].shape # [32, 32, 3] for CIFAR10\n\n super().__init__(target_model, None, None, None, None)\n\n def create_shadow_training_set(\n self,\n num_elements_per_class: Dict[int, int],\n ) -> List[Tuple[np.ndarray, np.ndarray]]:\n \"\"\"\n Create the shadow training sets with given ratio.\n The function works for the specific binary case that the ratio is a fixed distribution\n specified in the input.\n :param num_elements_per_class: number of elements per class\n :return: shadow training sets for given ratio\n \"\"\"\n\n training_sets = []\n\n # Creation of shadow training sets with the size dictionaries\n # amount_sets divided by 2 because amount_sets describes the total amount of shadow training sets.\n # In this function however only all shadow training sets of one type (follow property OR negation of property) are created, hence amount_sets / 2.\n self.logger.info(\"Creating shadow training sets\")\n\n for _ in tqdm(\n range(int(self.amount_sets / 2)),\n file=sys.stdout,\n disable=(self.logger.level > logging.INFO),\n ):\n shadow_training_sets = data_utils.new_dataset_from_size_dict(\n self.dataset, num_elements_per_class\n )\n training_sets.append(shadow_training_sets)\n\n return training_sets\n\n def train_shadow_classifiers(\n self,\n shadow_training_sets: List[Tuple[np.ndarray, np.ndarray]],\n num_elements_per_classes: Dict[int, int],\n ):\n \"\"\"\n Train shadow classifiers with each shadow training set (follows property or negation of property).\n :param shadow_training_sets: datasets fulfilling the a specific ratio to train shadow_classifiers\n :param num_elements_per_classes: specific class distribution\n :return: list of shadow classifiers,\n accuracies for the classifiers\n :rtype: Tuple[ List[:class:.art.estimators.estimator.BaseEstimator]\n \"\"\"\n\n shadow_classifiers = []\n\n num_classes = len(num_elements_per_classes)\n self.logger.info(\"Training shadow classifiers\")\n with logging_redirect_tqdm():\n for shadow_training_set in tqdm(\n shadow_training_sets,\n file=sys.stdout,\n disable=(self.logger.level > logging.INFO),\n ):\n model = copy_and_reset_model(self.target_model)\n trainer(\n shadow_training_set,\n num_elements_per_classes,\n model,\n self.logger,\n )\n\n # change pytorch classifier to art classifier\n art_model = Classifier._to_art_classifier(\n model,\n \"sparse_categorical_crossentropy\",\n num_classes,\n self.input_shape,\n )\n shadow_classifiers.append(art_model)\n\n return shadow_classifiers\n\n def create_shadow_classifier_from_training_set(\n self, num_elements_per_classes: Dict[int, int]\n ) -> list:\n # create training sets\n shadow_training_sets = self.create_shadow_training_set(num_elements_per_classes)\n\n # create classifiers with trained models based on given data set\n shadow_classifiers = self.train_shadow_classifiers(\n shadow_training_sets,\n num_elements_per_classes,\n )\n return shadow_classifiers\n\n @staticmethod\n def feature_extraction(model):\n \"\"\"\n Extract the features of a given model.\n :param model: a model from which the features should be extracted\n :type model: :class:`.art.estimators.estimator.BaseEstimator`\n # BaseEstimator is very general and could be specified to art.classifier\n :return: feature extraction\n :rtype: np.ndarray\n \"\"\"\n\n # Filter out all trainable parameters (from every layer)\n # This works differently for PyTorch and TensorFlow. Raise TypeError if model is\n # neither of both.\n if isinstance(model.model, torch.nn.Module):\n model_parameters = list(\n filter(lambda p: p.requires_grad, model.model.parameters())\n )\n # Store the remaining parameters in a concatenated 1D numPy-array\n model_parameters = np.concatenate(\n [el.cpu().detach().numpy().flatten() for el in model_parameters]\n ).flatten()\n return model_parameters\n\n elif isinstance(model.model, tf.keras.Model):\n model_parameters = np.concatenate(\n [el.numpy().flatten() for el in model.model.trainable_variables]\n ).flatten()\n return model_parameters\n else:\n raise TypeError(\n \"Expected model to be an instance of {} or {}, received {} instead.\".format(\n str(torch.nn.Module), str(tf.keras.Model), str(type(model.model))\n )\n )\n\n def create_meta_training_set(\n self, classifier_list_with_property, classifier_list_without_property\n ):\n \"\"\"\n Create meta training set out of shadow classifiers.\n :param classifier_list_with_property:\n list of all shadow classifiers that were trained on a dataset which fulfills the property\n :type classifier_list_with_property:\n iterable object of :class:`.art.estimators.estimator.BaseEstimator`\n :param classifier_list_without_property:\n list of all shadow classifiers that were trained on a dataset which does NOT fulfill the\n property\n :type classifier_list_without_property:\n iterable object of :class:`.art.estimators.estimator.BaseEstimator`\n :return: tuple (Meta-training set, label set)\n :rtype: tuple (np.ndarray, np.ndarray)\n \"\"\"\n # Apply self.feature_extraction on each shadow classifier and concatenate all features\n # into one array\n feature_list_with_property = np.array(\n [\n self.feature_extraction(classifier)\n for classifier in classifier_list_with_property\n ]\n )\n feature_list_without_property = np.array(\n [\n self.feature_extraction(classifier)\n for classifier in classifier_list_without_property\n ]\n )\n meta_features = np.concatenate(\n [feature_list_with_property, feature_list_without_property]\n )\n # Create corresponding labels\n meta_labels = np.concatenate(\n [\n np.ones(len(feature_list_with_property), dtype=int),\n np.zeros(len(feature_list_without_property), dtype=int),\n ]\n )\n\n return meta_features, meta_labels\n\n def train_meta_classifier(\n self, meta_training_X: np.ndarray, meta_training_y: np.ndarray\n ) -> TensorFlowV2Classifier:\n \"\"\"\n Train meta-classifier with the meta-training set.\n :param meta_training_X: Set of feature representation of each shadow classifier.\n :param meta_training_y: Set of labels for each shadow classifier,\n according to whether property is fullfilled (1) or not (0)\n :return: Art Meta classifier\n \"\"\"\n # reshaping train data to fit models input\n meta_training_X = meta_training_X.reshape(\n (meta_training_X.shape[0], meta_training_X[0].shape[0], 1)\n )\n meta_training_y = meta_training_y.reshape((meta_training_y.shape[0], 1))\n meta_input_shape = meta_training_X[0].shape\n\n # currently there are just 2 classes\n nb_classes = len(self.classes)\n\n inputs = tf.keras.Input(shape=meta_input_shape)\n\n # create model according to model from https://arxiv.org/pdf/2002.05688.pdf\n cnmc = ConvNetMetaClassifier(inputs=inputs, num_classes=nb_classes)\n\n cnmc.model.compile(\n loss=\"sparse_categorical_crossentropy\",\n optimizer=\"adam\",\n metrics=[\"accuracy\"],\n )\n\n # keras functional API provides a verbose variable ranging from {0, 1, 2}.\n # logging uses levels in our case corresponding to numeric values from {30, 20, 10}.\n # We can therefore convert our self.logger.level to the appropriate verbose value in the following manner:\n verbose = 3 - int(self.logger.level / 10)\n\n cnmc.model.fit(\n x=meta_training_X,\n y=meta_training_y,\n epochs=2,\n batch_size=128,\n verbose=verbose\n # If enough shadow classifiers are available, one could split the training set\n # and create an additional validation set as input:\n # validation_data = (validation_X, validation_y),\n )\n\n # model has .evaluate(test_X,test_y) function\n # convert model to ART classifier\n art_meta_classifier = Classifier._to_art_classifier(\n cnmc.model,\n loss=\"sparse_categorical_crossentropy\",\n nb_classes=nb_classes,\n input_shape=meta_input_shape,\n )\n\n return art_meta_classifier\n\n @staticmethod\n def perform_prediction(\n meta_classifier, feature_extraction_target_model\n ) -> np.ndarray:\n \"\"\"\n \"Actual\" attack: Meta classifier gets feature extraction of target model as input, outputs\n property prediction.\n :param meta_classifier: A classifier\n :type meta_classifier: \"CLASSIFIER_TYPE\" (to be found in .art.estimators)\n :param feature_extraction_target_model: extracted features of target model\n :type feature_extraction_target_model: np.ndarray\n :return: Prediction given as probability distribution vector whether property or negation\n of property is fulfilled for target data set\n :rtype: np.ndarray with shape (1, 2)\n \"\"\"\n\n feature_extraction_target_model = feature_extraction_target_model.reshape(\n (feature_extraction_target_model.shape[0], 1)\n )\n\n assert meta_classifier.input_shape == tuple(\n feature_extraction_target_model.shape\n )\n\n predictions = meta_classifier.predict(x=[feature_extraction_target_model])\n return predictions\n\n def output_attack_results(\n self, predictions_ratios\n ) -> UserOutputPropertyInferenceAttack:\n \"\"\"\n Determination of prediction with highest probability.\n :param predictions_ratios: Prediction values from meta-classifier for different subattacks (different properties)\n :type predictions_ratios: OrderedDict[float, np.ndarray]\n :return: Output message for the attack\n \"\"\"\n\n # get key & value of ratio with highest property probability\n max_property = max(predictions_ratios.items(), key=lambda item: item[1][0][0])\n\n output = dict()\n # rounding because calculation creates values like 0.499999999 when we expected 0.5\n for ratio in predictions_ratios:\n key = \"class {}: {}, class {}: {}\".format(\n self.classes[0], round(1 - ratio, 5), self.classes[1], round(ratio, 5)\n )\n output[key] = predictions_ratios[ratio][0][0]\n\n if len(self.ratios_for_attack) >= 2:\n max_message = (\n \"The most probable property is class {}: {}, \"\n \"class {}: {} with a probability of {}.\".format(\n self.classes[0],\n round(1 - max_property[0], 5),\n self.classes[1],\n round(max_property[0], 5),\n predictions_ratios[max_property[0]][0][0],\n )\n )\n else:\n if list(predictions_ratios.values())[0][0][0] > 0.5:\n max_message = \"The given distribution is more likely than a balanced distribution. \" \"The given distribution is class {}: {}, class {}: {}\".format(\n self.classes[0],\n round(1 - self.ratios_for_attack[0], 5),\n self.classes[1],\n round(self.ratios_for_attack[0], 5),\n )\n else:\n max_message = \"A balanced distribution is more likely than the given distribution. \" \"The given distribution is class {}: {}, class {}: {}\".format(\n self.classes[0],\n round(1 - self.ratios_for_attack[0], 5),\n self.classes[1],\n round(self.ratios_for_attack[0], 5),\n )\n if abs(list(predictions_ratios.values())[0][0][0] - 0.5) <= 0.05:\n warnings.warn(\n \"The probabilities are very close to each other. The prediction is likely to be a random guess.\"\n )\n\n return UserOutputPropertyInferenceAttack(max_message, output)\n\n def prediction_on_specific_property(\n self,\n feature_extraction_target_model: np.ndarray,\n shadow_classifiers_neg_property: list,\n ratio: float,\n ) -> np.ndarray:\n \"\"\"\n Perform prediction for a subattack (specific property)\n :param feature_extraction_target_model: extracted features of target model\n :param shadow_classifiers_neg_property: balanced shadow classifiers negation property\n :param ratio: distribution for the property\n :return: Prediction of meta-classifier for property and negation property\n \"\"\"\n\n # property of given ratio, only two classes allowed right now\n property_num_elements_per_classes = {\n self.classes[0]: int((1 - ratio) * self.size_set),\n self.classes[1]: int(ratio * self.size_set),\n }\n\n # create shadow classifiers with trained models with unbalanced data set\n shadow_classifiers_property = self.create_shadow_classifier_from_training_set(\n property_num_elements_per_classes\n )\n\n # create meta training set\n meta_features, meta_labels = self.create_meta_training_set(\n shadow_classifiers_property, shadow_classifiers_neg_property\n )\n\n # create meta classifier\n meta_classifier = self.train_meta_classifier(meta_features, meta_labels)\n\n # get prediction\n prediction = self.perform_prediction(\n meta_classifier, feature_extraction_target_model\n )\n\n return prediction\n\n def attack(self) -> UserOutputPropertyInferenceAttack:\n \"\"\"\n Performs Property Inference attack.\n :return: message with most probable property, dictionary with all properties\n \"\"\"\n self.logger.info(\"Initiating Property Inference Attack ... \")\n self.logger.info(\"Extracting features from target model ... \")\n # extract features of target model\n feature_extraction_target_model = self.feature_extraction(self.target_model)\n\n self.logger.info(\n \"{} --- features extracted from the target model.\".format(\n feature_extraction_target_model.shape\n ),\n )\n\n # balanced ratio\n num_elements = int(round(self.size_set / len(self.classes)))\n neg_property_num_elements_per_class = {i: num_elements for i in self.classes}\n\n self.logger.info(\n \"Creating set of {} balanced shadow classifiers ... \".format(\n int(self.amount_sets / 2)\n ),\n )\n # create balanced shadow classifiers negation property\n shadow_classifiers_neg_property = (\n self.create_shadow_classifier_from_training_set(\n neg_property_num_elements_per_class\n )\n )\n\n self.ratios_for_attack.sort()\n predictions = OrderedDict.fromkeys(self.ratios_for_attack, 0)\n # iterate over unbalanced ratios in 0.05 steps (0.05-0.45, 0.55-0.95)\n # (e.g. 0.55 means: class 0: 0.45 of all samples, class 1: 0.55 of all samples)\n\n self.logger.info(\"Performing PIA for various ratios ... \")\n\n for ratio in tqdm(\n self.ratios_for_attack,\n file=sys.stdout,\n disable=(self.logger.level > logging.INFO),\n ):\n predictions[ratio] = self.prediction_on_specific_property(\n feature_extraction_target_model,\n shadow_classifiers_neg_property,\n ratio,\n )\n\n return self.output_attack_results(predictions)\n","sub_path":"privacy_evaluator/attacks/property_inference_attack.py","file_name":"property_inference_attack.py","file_ext":"py","file_size_in_byte":20953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"477895609","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 19 08:06:16 2019\n\n@author: acharch\n\"\"\"\n\nimport sys\n\nMaster_List = {\"Bill Gates\":[1000,2000,3000,4000,5000],\n \"Marc Zuckerberg\":[1005,2005,3005,4005,5005],\n \"Jeff Bezos\": [9000,12000],\n \"Warren Buffet\": [1100,2200,3300,4000,5000],\n \"Tim Cook\": [100000,300000,400000,500000],\n \"Sundar Pichai\":[1000],\n \"Satya Nadella\":[1900,45672,983564,7646,5000,1,2,3],\n \"Charith Acharya\":[10,80,24,621]}\n\nprompt = \"\\n\".join(('Welcome',\n 'Please select from the following options',\n '1.Send A Thank You!',\n '2.Create a report',\n '3.Send a letter to all participants',\n '4. Quit'))\ndef main():\n while True:\n response = int(input(prompt))\n SwitchFuncDict = {1:Thanks,2:Report,3:Letter_to_all,4:Quit}\n SwitchFuncDict.get(response)()\n\ndef Thanks():\n Full_Name = input(\"Please enter the full name\".title())\n New_Amount = input(\"Please enter new amount\")\n Amount = int(New_Amount)\n if Full_Name in Master_List.keys():\n Master_List[Full_Name].append(Amount)\n print(Message(Full_Name, New_Amount)) \n elif Full_Name not in Master_List.keys():\n Master_List[Full_Name] = [Amount]\n print(Message(Full_Name, New_Amount)) \n \ndef Message(Full_Name, New_Amount):\n return(\" \".join([\"Hello\",Full_Name,\"Thank you for the donation of\",New_Amount]))\n \n \ndef donor_details():\n \"\"\"Print donation statistics for each donor\"\"\"\n Master_List_2 = {}\n Master_List_2 = {k:[sum(v),len(v), sum(v)/len(v)] for k,v in Master_List.items()}\n Master_List_3 = sorted(Master_List_2 .items(), key = lambda x: x[1], reverse = True)\n for donor in Master_List_3:\n print(\"{:<20} ${:>12,.2f}{:^12} ${:>12,.2f}\".format(donor[0], donor[1][0], donor[1][1], donor[1][2]))\n \n \ndef Report():\n \"\"\"Print a list of donors sorted by name, total donated amount, number of donation, and average donation amount\"\"\"\n print(\"{0:<20}{1:>12}{2:>12}{3:>15}\".format(\"Donor Name\", \"Total Given\", \"Num Gifts\", \"Average Gift\"))\n print(\"--------------------------------------------------------------\")\n donor_details()\n\n\ndef Report_Totals(i):\n SumI = sum(Master_List[i])\n return(SumI)\n\n\n\ndef Letter_to_all():\n for i in range(len(Master_List)):\n with open('Letter{}.txt'.format(list(Master_List.keys())[i]),'w') as f:\n f.write(\"Hello\")\n f.write(list(Master_List.keys())[i])\n f.write(\"Thank you for your donation of\")\n f.write(str(sum(list(Master_List.values())[i])))\n \ndef Quit():\n print(\"Bye!\")\n sys.exit()\n \nif __name__ == \"__main__\":\n main()\n \n ","sub_path":"students/Charith_Acharya1/Lesson6.py","file_name":"Lesson6.py","file_ext":"py","file_size_in_byte":2809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"341109455","text":"# coding=utf-8\nimport json\n\nimport matplotlib.pyplot as plt\nimport networkx as nx\n\n\nclass HumanTopology:\n def __init__(self):\n pass\n\n @staticmethod\n def print_topology(graph, check_constraint=None, save_figure=False):\n if check_constraint is None:\n nx.draw(graph, with_labels=True)\n plt.show()\n return\n # 生成节点标签\n labels = {}\n for l in graph.adjacency():\n labels[l[0]] = l[0]\n\n # 获取graph中的边权重\n edge_labels = nx.get_edge_attributes(graph, check_constraint)\n print('weight of all edges:', edge_labels)\n\n # 生成节点位置\n pos = nx.circular_layout(graph)\n print('position of all nodes:', pos)\n\n node_color = []\n for i in range(0, len(graph)):\n node_color.append('b')\n # if len(graph) >= 2:\n # node_color.append('r')\n\n # 把节点画出来\n nx.draw_networkx_nodes(graph, pos, node_color=node_color, node_size=500, alpha=0.8)\n\n # 把边画出来\n nx.draw_networkx_edges(graph, pos, width=1.0, alpha=0.5, edge_color='black')\n\n # 把节点的标签画出来\n nx.draw_networkx_labels(graph, pos, labels, font_size=16)\n\n # 把边权重画出来\n nx.draw_networkx_edge_labels(graph, pos, edge_labels)\n\n plt.title(\"network's topology of \" + check_constraint)\n\n plt.axis('on')\n # 去掉坐标刻度\n plt.xticks([])\n plt.yticks([])\n\n if save_figure:\n plt.savefig(check_constraint + \".png\") # 输出方式1: 将图像存为一个png格式的图片文件\n\n plt.show()\n\n @staticmethod\n def init_topology(path=None):\n # 从mn文件即json文件构造图\n if path is not None:\n try:\n f = open(path, 'r') # 打开文件\n json_string = f.read() # 读取文件内容\n parsed_json = json.loads(json_string)\n graph = nx.Graph()\n for switch in parsed_json['switches']:\n graph.add_node(switch['opts']['hostname'].encode(\"utf8\"))\n\n for link in parsed_json['links']:\n if link['src'].encode(\"utf8\").startswith('h') \\\n or link['dest'].encode(\"utf8\").startswith('h'):\n continue\n graph.add_edge(link['src'].encode(\"utf8\"), link['dest'].encode(\"utf8\"))\n for key, value in link['opts'].items():\n if key.encode(\"utf8\") == 'bw':\n int_value = value\n else:\n int_value = int(value.encode(\"utf8\"))\n graph[link['src'].encode(\"utf8\")][link['dest'].encode(\"utf8\")] \\\n [key.encode(\"utf8\")] = int_value\n finally:\n if f:\n f.close() # 确保文件被关闭\n return graph\n return graph\n","sub_path":"ryu/app/network_awareness/algorithms/humanTopology.py","file_name":"humanTopology.py","file_ext":"py","file_size_in_byte":3033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"280312020","text":"import torch\nimport torch.optim\nimport torch.nn.functional as F\n\nimport cv2\nimport os.path\nimport time\nimport os\nimport glob\nimport numpy as np\nfrom datetime import datetime\nfrom scipy.signal import convolve2d\nfrom nuocnet.models.usrnet import USRNet as net\nimport matplotlib.pyplot as plt\nimport nuocnet.utils.utils_image as util\nimport nuocnet.utils.utils_deblur as util_deblur\n\ndef make_size_divisible(img,stride):\n\tw,h,_ = img.shape\n\n\tw_new = w//stride*stride\n\th_new = h//stride*stride\n\n\treturn img[:w_new,:h_new,:]\n\ndef main():\n\t# ----------------------------------------\n\t# load kernels\n\t# ----------------------------------------\n\tPSF_grid = np.load('./data/Schuler_PSF01.npz')['PSF']\n\t#PSF_grid = np.load('./data/Schuler_PSF02.npz')['PSF']\n\t#PSF_grid = np.load('./data/Schuler_PSF03.npz')['PSF']\n\t#PSF_grid = np.load('./data/PSF.npz')['PSF']\n\t#print(PSF_grid.shape)\n\t\n\tPSF_grid = PSF_grid.astype(np.float32)\n\n\tgx,gy = PSF_grid.shape[:2]\n\tfor xx in range(gx):\n\t\tfor yy in range(gy):\n\t\t\tPSF_grid[xx,yy] = PSF_grid[xx,yy]/np.sum(PSF_grid[xx,yy],axis=(0,1))\n\n\t# ----------------------------------------\n\t# load model\n\t# ----------------------------------------\n\tdevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\tmodel = net(n_iter=8, h_nc=64, in_nc=4, out_nc=3, nc=[64, 128, 256, 512],\n\t\t\t\t\tnb=2, act_mode=\"R\", downsample_mode='strideconv', upsample_mode=\"convtranspose\")\n\tmodel.load_state_dict(torch.load('usrnet_bench.pth'), strict=True)\n\tmodel.eval()\n\t#model.train()\n\tfor _, v in model.named_parameters():\n\t\tv.requires_grad = False\n\t#\tv.requires_grad = False\n\tmodel = model.to(device)\n\n\n\n\timg_L = cv2.imread('/home/xiu/workspace/dwgan/MPI_data/bench/blurry.jpg')\n\timg_H = cv2.imread('/home/xiu/workspace/dwgan/MPI_data/bench/schuler.jpg')\n\tprint(img_L.shape)\n\t#10x6\n\t#1097x730\n\t#109.7,12\n\n\n\tpatch_size = 2*256\n\tnum_patch = 2\n\texpand = PSF_grid.shape[2]//2\n\tb_size = patch_size//num_patch\n\n\tab_numpy = np.loadtxt('ab_bench.txt').astype(np.float32)\n\tab_numpy = ab_numpy[...,None,None]\n\t#ab_numpy[:,0] = 0.01\n\t#ab_numpy[:,1] = 0.01\n\t#ab_numpy[:,2] = 0.01\n\n\tab = torch.tensor(ab_numpy,device=device,requires_grad=False)\n\t\n\trunning = True\n\n\n\twhile running:\n\t\t#alpha.beta\n\t\t#px_start = np.random.randint(0,PSF_grid.shape[0]//2+1)\n\t\t#py_start = np.random.randint(0,PSF_grid.shape[1]//2+1)\n\t\tpx_start = 0\n\t\tpy_start = 0\n\n\t\tPSF_patch = PSF_grid[px_start:px_start+num_patch,py_start:py_start+num_patch]\n\n\t\t# x = util.uint2single(patch_L)\n\t\tblock_size = patch_size//num_patch\n\t\tpatch_L = img_L[px_start*b_size:(px_start+num_patch)*b_size,py_start*b_size:py_start*b_size+patch_size,:]\n\t\tpatch_H = img_H[px_start*b_size:(px_start+num_patch)*b_size,py_start*b_size:py_start*b_size+patch_size,:]\n\t\t#block_expand = expand*2\n\t\tblock_expand = expand\n\t\t#block_expand = 1\n\t\tif block_expand > 0:\n\t\t\tpatch_L_wrap = util_deblur.wrap_boundary_liu(patch_L,(patch_size+block_expand*2,patch_size+block_expand*2))\n\t\t\t#centralize\n\t\t\tpatch_L_wrap = np.hstack((patch_L_wrap[:,-block_expand:,:],patch_L_wrap[:,:patch_size+block_expand,:]))\n\t\t\tpatch_L_wrap = np.vstack((patch_L_wrap[-block_expand:,:,:],patch_L_wrap[:patch_size+block_expand,:,:]))\n\t\telse:\n\t\t\tpatch_L_wrap = patch_L\n\t\tif block_expand>0:\n\t\t\tx = util.uint2single(patch_L_wrap)\n\t\telse:\n\t\t\tx = util.uint2single(patch_L)\n\t\tx_blocky = []\n\t\tfor h_ in range(num_patch):\n\t\t\tfor w_ in range(num_patch):\n\t\t\t\tx_blocky.append(x[w_*block_size:w_*block_size+block_size+block_expand*2,\\\n\t\t\t\t\th_*block_size:h_*block_size+block_size+block_expand*2:])\t\n\t\tx_blocky = [util.single2tensor4(el) for el in x_blocky]\n\t\tx_blocky = torch.cat(x_blocky,dim=0)\n\n\t\t# x = util.single2tensor4(x)\n\n\t\t# x_blocky = torch.cat(torch.chunk(x,num_patch,dim=2),dim=0)\n\t\t# x_blocky = torch.cat(torch.chunk(x_blocky,num_patch,dim=3),dim=0)\n\n\t\tk_all = []\n\t\tfor w_ in range(num_patch):\n\t\t\tfor h_ in range(num_patch):\n\t\t\t\tk_all.append(util.single2tensor4(PSF_patch[h_,w_]))\n\t\tk = torch.cat(k_all,dim=0)\n\n\t\t[x_blocky,k] = [el.to(device) for el in [x_blocky,k]]\n\n\t\tx_E = model.forward_patchdeconv(x_blocky,k,ab,[num_patch,num_patch],patch_sz=patch_size//num_patch)\n\t\tx_E = x_E[:-1]\n\n\t\tpatch_L = patch_L_wrap.astype(np.uint8)\n\n\t\tpatch_E = util.tensor2uint(x_E[-1])\n\t\tpatch_E_all = [util.tensor2uint(pp) for pp in x_E]\n\t\tpatch_E_z = np.hstack((patch_E_all[::2]))\n\t\tpatch_E_x = np.hstack((patch_E_all[1::2]))\n\n\t\tpatch_E_show = np.vstack((patch_E_z,patch_E_x))\n\t\tif block_expand>0:\n\t\t\tshow = np.hstack((patch_H,patch_L[block_expand:-block_expand,block_expand:-block_expand],patch_E))\n\t\telse:\n\t\t\tshow = np.hstack((patch_H,patch_L,patch_E))\n\n\t\t#get kernel\n\t\tcv2.imshow('stage',patch_E_show)\n\t\tcv2.imshow('HL',show)\n\t\tkey = cv2.waitKey(-1)\n\t\tif key==ord('n'):\n\t\t\tbreak\n\n\n\n\n\t\n\n\nif __name__ == '__main__':\n\n\tmain()\n","sub_path":"tests/test_bench.py","file_name":"test_bench.py","file_ext":"py","file_size_in_byte":4719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"109682852","text":"class Solution:\n def merge(self, intervals):\n out = []\n # 照每個區間之首排序\n for i in sorted(intervals, key=lambda x:x[0]):\n # 合併區間(直接更新尾段)\n if out and i[0] <= out[-1][-1]:\n out[-1][-1] = max(out[-1][-1], i[-1])\n # 加入結果\n else:\n out += [i]\n return out\n \nif __name__ == \"__main__\":\n s = Solution()\n print(s.merge(intervals = [[1,3],[2,6],[8,10],[15,18]]))\n ","sub_path":"056-Merge-Intervals/Merge-Intervals.py","file_name":"Merge-Intervals.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"602965137","text":"import numpy as np\nfrom env import *\nV = np.zeros(nS)\n\ndef best_value(s,V):\n Vk_list = np.zeros(nA)\n for a in range(nA):\n for prob,next_state,reward,done in ENV[s][a]:\n Vk_list[a]+=prob*(reward + gama*V[next_state])\n return Vk_list\n\ndef value_iteration():\n while True:\n delta = 0\n for s in range(nS):\n best_action_value = max(best_value(s, V))\n delta = max(delta, np.abs(best_action_value - V[s]))\n V[s] = best_action_value\n\n if delta < theta:\n break\n\n policy = np.zeros([nS, nA])\n for s in range(nS):\n A = best_value(s, V)\n best_action = np.argmax(A)\n policy[s, best_action] = 1.0\n return policy\n\n\nprint(\"Policy Probability Distribution:\")\nprint(value_iteration())\nprint(\"\")\n","sub_path":"ValueIteration/value_iteration.py","file_name":"value_iteration.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"374373480","text":"#!/usr/bin/env python\n\nfrom __future__ import unicode_literals, print_function\n\nimport plac\nimport multiprocessing\nimport joblib\nimport time\nfrom os import path\nimport os\nimport bz2\nimport ujson\nfrom preshed.counter import PreshCounter\nfrom joblib import Parallel, delayed\nimport io\n\n\nimport spacy\nfrom spacy.en import English\nfrom spacy.strings import StringStore\nfrom spacy.attrs import ORTH\nfrom spacy.tokenizer import Tokenizer\nfrom spacy.vocab import Vocab\n\n#import madoka\n\nDEFAULT_CORES = multiprocessing.cpu_count()\n\ndef iter_comments(loc):\n with io.open(loc, 'r') as file_:\n for line in file_:\n yield line\n\ndef parallelize(func, iterator, n_jobs, lang_class):\n Parallel(n_jobs=n_jobs)(delayed(func)(*item, lang_class) for item in iterator)\n\ndef merge_counts(locs, out_loc):\n string_map = StringStore()\n counts = PreshCounter()\n counts_docs = PreshCounter()\n for loc in locs:\n with io.open(loc, 'r', encoding='utf8') as file_:\n for line in file_:\n freq, word = line.strip().split('\\t', 1)\n orth = string_map[word]\n counts.inc(orth, int(freq))\n counts_docs.inc(orth, 1)\n with io.open(out_loc, 'w', encoding='utf8') as file_:\n for orth, count in counts:\n string = string_map[orth]\n file_.write('%d\\t%d\\t%s\\n' % (count, counts_docs[orth], string))\n\ndef count_freqs(input_loc, output_loc, LangClass):\n start = time.time()\n print('INFO: Processing ', input_loc)\n vocab = LangClass.Defaults.create_vocab()\n tokenizer = LangClass.Defaults.create_tokenizer()\n #Tokenizer(vocab,path.join(LangClass.default_data_dir(), 'tokenizer'))\n\n counts = PreshCounter()\n for text in iter_comments(input_loc):\n doc = tokenizer(text)\n doc.count_by(ORTH, counts=counts)\n\n with io.open(output_loc, 'w', encoding='utf8') as file_:\n for orth, freq in counts:\n string = tokenizer.vocab.strings[orth]\n if not string.isspace():\n file_.write('%d\\t%s\\n' % (freq, string))\n end = time.time()-start\n print('INFO: File {} took {} min '.format(input_loc, end/60))\n\n@plac.annotations(\n input_loc=(\"Location of input file list\"),\n freqs_dir=(\"Directory for frequency files\"),\n output_loc=(\"Location for output file\"),\n lang_name=(\"Language\"),\n n_jobs=(\"Number of workers\", \"option\", \"n\", int),\n skip_existing=(\"Skip inputs where an output file exists\", \"flag\", \"s\", bool),\n)\ndef main(input_loc, freqs_dir, output_loc, lang_name='en', n_jobs=DEFAULT_CORES, skip_existing=False):\n tasks = []\n outputs = []\n\n LangClass = spacy.util.get_lang_class(lang_name)\n\n for input_path in open(input_loc):\n input_path = input_path.strip()\n if not input_path:\n continue\n filename = input_path.split('/')[-1]\n output_path = path.join(freqs_dir, filename.replace('bz2', 'freq'))\n outputs.append(output_path)\n if not path.exists(output_path) or not skip_existing:\n tasks.append((input_path, output_path))\n\n if tasks:\n parallelize(count_freqs, tasks, n_jobs, LangClass)\n\n print(\"INFO: Merging the counts to \", output_loc)\n merge_counts(outputs, output_loc)\n\n\nif __name__ == '__main__':\n plac.call(main)\n","sub_path":"training/word_freqs_new_language.py","file_name":"word_freqs_new_language.py","file_ext":"py","file_size_in_byte":3300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"353337084","text":"import re\r\n\r\nquantity = int(input(\"Введите количество билетов Сигизмунда в промежутке 1 <= x <= 10^9: \"))\r\nif 1 <= quantity <= 10 ** 9:\r\n tickets = input(\"Введите билеты через пробел: \").split(' ')\r\n \r\n regex = r\"(^a[a-z0-9]{3}55661)\"\r\n a = re.compile(regex) \r\n \r\n result = list(filter(a.match, tickets))\r\n \r\n if len(result) == 0:\r\n print(-1)\r\n else:\r\n print(*result, sep=' ')","sub_path":"Practice/16/16.py","file_name":"16.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"251851027","text":"import json\r\nfrom bson import json_util\r\nfrom pymongo import MongoClient, errors\r\nimport numbers\r\n\r\nDEBUG = False\r\ndbName = \"\"\r\ncolName = \"\"\r\n\r\n# Notes: Might be a good idea to create few models for our 'production' db.\r\n# This way, we can ensure that the data being inserted into our db\r\n# matches the format that we want. From the rubric, this doesn't look\r\n# completely necessary right now though. \r\n\r\n\r\n# Supposedly MongoClient constructor won't raise ConnectionFailure or \r\n# ConfigurationError anymore. More info in the reference:\r\n# https://api.mongodb.com/python/current/api/pymongo/mongo_client.html\r\n\r\nclient = MongoClient('localhost', 27017)\r\n\r\nif DEBUG:\r\n db = client.db\r\n dbName = \"db\"\r\n collection = db.col\r\n colName = \"col\"\r\nelse:\r\n db = client.market\r\n dbName = \"market\"\r\n collection = db.stocks\r\n colName = \"stocks\"\r\n\r\n# print_docs(Cursor docs)\r\n# @params: Cursor document\r\n# @return: void\r\n# @_nodes: Just a helper method that prints all the documents\r\n# output by read_document\r\ndef print_docs(docs):\r\n for doc in docs:\r\n print(doc)\r\n\r\ndef insert_documents(data):\r\n \"\"\"\r\n Will insert documents into the picked collection.\r\n \r\n @params: dictionary or array\r\n\r\n @return: { \"ids\": [...ids] }\r\n \"\"\"\r\n \r\n docs = []\r\n result = { \"Error\": \"Must be a dict or list!\" }\r\n\r\n if isinstance(data, dict):\r\n docs.append(data)\r\n elif isinstance(data, list):\r\n # I won't go much into type checking, but this could fail if we get \r\n # list and it's not full of dictionaries.\r\n docs = data\r\n\r\n # Since we're not using d, this won't even be tried...I don't think.\r\n try:\r\n res = collection.insert_many(docs)\r\n # If the list is empty, this will throw an exception and the method\r\n # will return.\r\n ids = []\r\n for data in res.inserted_ids:\r\n ids.append(data)\r\n\r\n result = { \"ids\": ids }\r\n \r\n except Exception as e:\r\n result = { \"Error\": (\"%s\" % e) }\r\n \r\n return result\r\n \r\ndef read_document(k, v = None):\r\n \"\"\"\r\n Will find documents in the db using k, v or a dictionary.\r\n\r\n @params: { k, v } or dictionary\r\n\r\n @return: Cursor\r\n \"\"\"\r\n try:\r\n docsFound = 0\r\n if v is None:\r\n results = collection.find(k) \r\n else:\r\n results = collection.find({ k : v })\r\n \r\n docsFound = results.count()\r\n \r\n if docsFound < 1:\r\n error = [{ \"Error\": \"No documents found!\", \"alive\": False }]\r\n results = iter(error)\r\n\r\n except Exception as e:\r\n results = iter([{ \"Error\": (\"%s\" % e), \"alive\": False }])\r\n\r\n return results\r\n\r\n# update_document(string lk, Type lv, Dict document)\r\n# Will update a single document (rubric says 'a document' not documents).\r\n# @params: k, v, document # lookup key and value, and replacing document\r\n# @return: JSON of insert, else Mongo error \r\ndef update_document(k, v, document = None):\r\n \"\"\"\r\n Will update a single document, can upsert if necessary.\r\n\r\n @params: { k, v } or document and data for update\r\n\r\n @return: JSON of insert or MongoDB error.\r\n \"\"\"\r\n if document is None:\r\n doc = k\r\n document = v\r\n else:\r\n doc = { k: v }\r\n \r\n try:\r\n # Leaving this as update_one() because we're interested in upserting\r\n # as well. find_one_and_update() requires, well, to find one.\r\n update = collection.update_one(\r\n doc,\r\n { \"$set\" : document },\r\n upsert = True\r\n )\r\n\r\n result = next(read_document(k, v))\r\n\r\n # We'll attach some diagnostics into the payload going back to the\r\n # client.\r\n if update.upserted_id:\r\n result.update({ \"upserted\": True })\r\n\r\n # I tried the abort() method like you suggested, but ultimately I was\r\n # thinking that the server should take care of HTTP error responses,\r\n # while this API should only concern itself with contacting the db.\r\n # I'm not sure which is faster, but this *feels* like a best practice.\r\n except Exception as e:\r\n result = { \"Error\": (\"%s\" % e) }\r\n \r\n return result\r\n\r\ndef delete_document(k, v = None):\r\n \"\"\"\r\n Will only delete the first document found.\r\n\r\n @params: { k, v } or dictionary\r\n\r\n @return: JSON of deleted document or MongoDB error.\r\n \"\"\"\r\n try:\r\n cursor = read_document(k, v)\r\n\r\n result = next(cursor)\r\n\r\n # May need this for diagnostic, not sure:\r\n response = collection.delete_one({ \"_id\": result[\"_id\"] })\r\n \r\n except Exception as e:\r\n result = { \"Error\": (\"%s\" % e) }\r\n \r\n return result\r\n\r\n# _Notes: For the next few functions, we'll largely be using aggregation via\r\n# PyMongo. I don't think it's necessary to create an aggregation method\r\n# because I'll end up passing in a pipline anyway, and it's just a\r\n# single call to PyMongo. If I have extra time, I'll refactor/optimize\r\n# where I can; but being honest, I don't think it'll be here.\r\n# Update: I suppose I could write a generic aggregation method, but the problem\r\n# will be $group and $project. I'll keep it this way for now.\r\n\r\ndef find_SMA_50(low, high):\r\n \"\"\"\r\n Will return the number of stocks that exist between two numbers.\r\n\r\n @params: low, high\r\n \r\n @return: Count of stocks found in JSON. Note: It's going to be in JSON\r\n because I want to return errors as well, if the parameters aren't\r\n numbers.\r\n \"\"\"\r\n result = {}\r\n if not (isinstance(low, numbers.Number)):\r\n result = { \"Error\": \"First parameter is not a number!\"}\r\n if not (isinstance(high, numbers.Number)):\r\n result = { \"Error\": \"Second parameter is not a number!\"}\r\n\r\n # Let's just exit out of this if any paramter is not a number.\r\n if \"Error\" in result:\r\n return result\r\n\r\n # Okay, so assuming low and high are both numbers, we should then search\r\n # for all documents with \"50-Day Simple Moving Average\" valued between\r\n # high and low. Let's use an aggregation pipeline for this.\r\n pipeline = [\r\n {\r\n \"$project\": {\r\n \"_id\": 0,\r\n \"Ticker\": 1,\r\n \"50-Day Simple Moving Average\": 1\r\n }\r\n },\r\n {\r\n \"$match\": {\r\n \"50-Day Simple Moving Average\": {\r\n \"$gt\": low,\r\n \"$lt\": high\r\n }\r\n }\r\n },\r\n {\r\n \"$group\": {\r\n \"_id\": \"null\",\r\n \"count\": { \"$sum\": 1 }\r\n }\r\n }\r\n ]\r\n\r\n try:\r\n search = collection.aggregate(pipeline)\r\n if search.alive:\r\n result = { \"Count\": next(search)[\"count\"] }\r\n else:\r\n result = { \"Error\": \"No documents found.\" }\r\n\r\n except Exception as e:\r\n result = { \"Error\": (\"%s\" % e) }\r\n\r\n return result\r\n\r\ndef find_industry(industry):\r\n \"\"\"\r\n Will return a list of ticker symbols within an industry.\r\n \r\n @params: industry\r\n \r\n @return: Array of all tickers in the industry.\r\n \"\"\"\r\n result = {}\r\n # Let's first make sure that we're getting a string:\r\n if not (isinstance(industry, str)):\r\n result = { \"Error\": \"Parameter requires a string!\" }\r\n # If it's not, lets just leave the function.\r\n return result\r\n\r\n # Now that we know we have a string, let's put together a list of tickers\r\n # that reside within the industry. We'll use the aggregation framework\r\n # again.\r\n pipeline = [\r\n {\r\n \"$project\": {\r\n \"_id\": 0,\r\n \"Ticker\": 1,\r\n \"Industry\": 1\r\n }\r\n },\r\n {\r\n \"$match\": {\r\n \"Industry\": industry\r\n }\r\n },\r\n {\r\n \"$group\": {\r\n \"_id\": \"null\",\r\n \"Tickers\": { \"$push\": \"$Ticker\" }\r\n }\r\n }\r\n ]\r\n\r\n try:\r\n result = collection.aggregate(pipeline)\r\n if not result.alive:\r\n result = iter([{ \"Error\": \"No documents found.\" }])\r\n\r\n except Exception as e:\r\n result = iter([{ \"Error\": (\"%s\" % e) }])\r\n \r\n return next(result)\r\n \r\ndef find_outstanding_shares_by_sector(sector):\r\n \"\"\"\r\n Find the outstanding shares for a sector grouped by industry.\r\n \r\n @params: sector\r\n \r\n @return: List with JSON objects\r\n \"\"\"\r\n result = {}\r\n\r\n if not (isinstance(sector, str)):\r\n result = { \"Error\": \"Parameter requires a string!\" }\r\n # If it's not, lets just leave the function.\r\n return result\r\n\r\n pipeline = [\r\n {\r\n \"$project\": {\r\n \"_id\": 0,\r\n \"Sector\": 1,\r\n \"Industry\": 1,\r\n \"Shares Outstanding\": 1\r\n }\r\n },\r\n {\r\n \"$match\": {\r\n \"Sector\": sector\r\n }\r\n },\r\n {\r\n \"$group\": {\r\n \"_id\": \"$Industry\",\r\n \"Total Outstanding Shares\": { \"$sum\": \"$Shares Outstanding\" }\r\n }\r\n }\r\n ]\r\n\r\n try:\r\n search = collection.aggregate(pipeline)\r\n\r\n # I'm a little conflicted; I could just return the entire cursor or an\r\n # error if the cursor isn't alive (i.e. if !search.alive), but maybe a\r\n # single list will be better. Or maybe it would be better if I built a\r\n # dictionary using industry as the key and shares as the value. Idk. \r\n # I'll do it as a list for now.\r\n\r\n docs = []\r\n\r\n if not search.alive:\r\n result = { \"Error\": \"No industries found!\" }\r\n else:\r\n for doc in search:\r\n docs.append(doc)\r\n\r\n result = { \"shares\": docs }\r\n\r\n except Exception as e:\r\n result = { \"Error: \": e }\r\n\r\n return result\r\n\r\ndef main():\r\n if DEBUG:\r\n print(\"---- WARNING!: IN DEBUG MODE ----\")\r\n print(\"-- COLLECTION WILL BE DROPPED! --\\n\")\r\n # collection.drop()\r\n\r\n print(\" DB: %s\" % dbName)\r\n print(\"Collection: %s\\n\" % colName)\r\n\r\nmain()","sub_path":"backend/stocks/api/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":10231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"95899912","text":"from fgsm_attack import Attacker\n\nimport sys\nimport cv2\nimport os\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport torch\nimport torchvision.transforms as transforms\nfrom dataset import Adverdataset\n\nif __name__ == '__main__':\n input_dirname = sys.argv[1] #'./submission/images'\n output_dirname = sys.argv[2] #'~/Download/data'\n\n #read imgs\n img_size = 128\n image_fns = sorted(os.listdir(input_dirname))\n print(image_fns[:20])\n print(\"data len\",len(image_fns))\n\n window_sizes = [3,5,7]\n sigmas = [11,21,31]\n fig, axs = plt.subplots(3, 4, figsize=(40, 6))\n for idx,window_size in enumerate(window_sizes):\n os.makedirs(os.path.join(output_dirname,\"avg\"+str(window_size)))\n os.makedirs(os.path.join(output_dirname,\"gau\"+str(window_size)))\n os.makedirs(os.path.join(output_dirname,\"med\"+str(window_size)))\n os.makedirs(os.path.join(output_dirname,\"bil\"+str(window_size)))\n for i, file in enumerate(image_fns):\n img = cv2.imread(os.path.join(input_dirname, file)) # read as BGR\n #avg blur\n img_avg = cv2.blur(img,(window_size,window_size))\n cv2.imwrite(os.path.join(output_dirname,\"avg\"+str(window_size),file),img_avg)\n\n img_gau = cv2.GaussianBlur(img,(window_size,window_size),0)\n cv2.imwrite(os.path.join(output_dirname,\"gau\"+str(window_size),file),img_gau)\n\n img_med = cv2.medianBlur(img,window_size)\n cv2.imwrite(os.path.join(output_dirname,\"med\"+str(window_size),file),img_med)\n\n img_bil = cv2.bilateralFilter(img,window_size,sigmas[idx],sigmas[idx])\n cv2.imwrite(os.path.join(output_dirname,\"bil\"+str(window_size),file),img_bil)\n\n if i==0:\n axs[idx][0].imshow(img_avg)\n axs[idx][0].set_title(\"avg\"+str(window_size)+\"x\"+str(window_size))\n axs[idx][1].imshow(img_gau)\n axs[idx][1].set_title(\"gau\"+str(window_size)+\"x\"+str(window_size)+\" 0\")\n axs[idx][2].imshow(img_med)\n axs[idx][2].set_title(\"med\"+str(window_size)+\"x\"+str(window_size))\n axs[idx][3].imshow(img_bil)\n axs[idx][3].set_title(\"bil\"+str(window_size)+\"x\"+str(window_size)+\" \"+str(sigmas[idx]))\n fig.suptitle(\"defense: {}\".format(output_dirname),fontsize=24)\n plt.show()\n","sub_path":"hw6_adversarial_attack_img/defense.py","file_name":"defense.py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"305783713","text":"import bilibiliuploader.core as core\nimport bilibiliuploader.util.persist as persist\n\n\nclass BilibiliUploader():\n def __init__(self):\n self.access_token = None\n self.refresh_token = None\n self.sid = None\n self.mid = None\n\n def login(self, username, password):\n self.access_token, self.refresh_token, self.sid, self.mid = persist.load_login_info()\n print(self.access_token)\n if self.access_token == \"\":\n self.access_token, self.refresh_token, self.sid, self.mid = core.login(username, password)\n persist.save_login_info(self.access_token, self.refresh_token, self.sid, self.mid)\n\n def upload(self,\n parts,\n copyright: int,\n title: str,\n tid: int,\n tag: str,\n desc: str,\n dtime: int,\n source: str = '',\n cover: str = '',\n no_reprint: int = 0,\n open_elec: int = 1,\n max_retry: int = 5,\n thread_pool_workers: int = 1):\n return core.upload(self.access_token,\n self.sid,\n self.mid,\n parts,\n copyright,\n title,\n tid,\n tag,\n desc,\n dtime,\n source,\n cover,\n no_reprint,\n open_elec,\n max_retry,\n thread_pool_workers)\n","sub_path":"bilibiliuploader/bilibiliuploader.py","file_name":"bilibiliuploader.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"607860166","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\n\n# Aufgabe 4\n# abgabe von Moritz Walter und Manar Zaboub\n\ndef load_spam_data():\n\n data = pd.read_csv(\"spambase.data\", header=None).values\n X = data[:, :-1]\n y = data[:, -1]\n\n print(\"Der Datensatz besteht aus %d E-Mails, wovon %d Spam sind und %d nicht\" % (len(y), sum(y == 1), sum(y == 0)))\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=30, stratify=y)\n return X_train, X_test, y_train, y_test\n\ndef separate_data(x, y, digit):\n x_digit = x[y == digit]\n return x_digit\n\ndef aufgabe1(X_train, X_test, y_train, y_test):\n print(\"Aufgabe 1\")\n\n spam_train = separate_data(X_train,y_train,1)\n no_spam_train = separate_data(X_train, y_train, 0)\n\n u = calcFisherDisk(spam_train,no_spam_train) ## erste klasse > 0\n\n # projeziere Daten\n\n testdata = np.dot(X_test, u)\n\n # erstelle konfusionsmatrix\n gefunden = np.array([(0, 0, 1),\n (0, 0, 0),\n (1, 0, 0)])\n for i in range(len(testdata)):\n p = testdata[i]\n aktuellesLabel = int(y_test[i])\n gefundenes_label = -1\n if p > 0.92:\n gefundenes_label = 2 # gehört zu klasse spam\n else:\n gefundenes_label = 1 # gehört zu klasse kein spam\n gefunden[aktuellesLabel + 1, gefundenes_label] = gefunden[aktuellesLabel + 1, gefundenes_label] + 1\n\n print(gefunden)\n fehler = gefunden[1, 2] + gefunden[2, 1]\n richtig = gefunden[1, 1] + gefunden[2, 2]\n print(\"Fehler: \" + str(fehler))\n print(\"Richtig: \" + str(richtig))\n print(\"Fehlerquote: \" + str((fehler / (richtig + fehler)) * 100) + \" %\")\n\n # plot points on line\n spam = np.dot(spam_train, u)\n nspam = np.dot(no_spam_train, u)\n\n fig, axs = plt.subplots(1, 1, figsize=(20, 1) )\n\n axs.scatter(nspam, np.zeros(len(nspam)), c =\"blue\", s=1)\n axs.scatter(0.92,0.5, c=\"black\", s=1)\n axs.scatter(spam, np.ones(len(spam)), c=\"red\", s=1) # plotte höher um überlappung besser zu sehen\n\n plt.show()\n\ndef normalise(x):\n v = x\n z = []\n for i in range(len(v)):\n\n z.append((v[i] / np.linalg.norm(v[i])))\n return np.array(z)\n\ndef regularize_covariance_matrix(cov, alpha_min):\n alpha = alpha_min\n cov_reg = np.eye(len(cov)) * alpha + (1 - alpha) * cov\n while np.linalg.det(cov_reg) == 0.0:\n alpha += 0.01\n cov_reg = np.eye(len(cov)) * alpha + (1 - alpha) * cov\n return cov_reg\n\ndef calcFisherDisk(klasse1, klasse2):\n # vektoren in format: [(1,...),(2,....), .... ,(n,......)]\n # berechne ua und ub\n u1 = np.mean(klasse1, axis=0)\n u2 = np.mean(klasse2, axis=0)\n\n # berechne Kovariansmatrizen\n\n cov1 = np.cov(klasse1.T) # warum 4\n cov2 = np.cov(klasse2.T) # warum 5\n\n # berechne summe\n cov_sum = cov1 + cov2\n\n #damit immer invertierbar\n if np.linalg.det(cov_sum) == 0.0:\n cov_sum = regularize_covariance_matrix(cov_sum,0.001)\n\n # berechne diskriminante u\n\n u = np.dot(np.linalg.inv(cov_sum),np.subtract(u1,u2))\n\n return u\n\n\ndef main():\n X_train, X_test, y_train, y_test = load_spam_data()\n aufgabe1(X_train, X_test, y_train, y_test)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"UE4/fertige loesung/Aufgabe4_1.py","file_name":"Aufgabe4_1.py","file_ext":"py","file_size_in_byte":3300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"28788514","text":"import Tkinter, tkFileDialog, tkMessageBox\nfrom Tkinter import *\nfrom os.path import splitext\nimport xlrd\nimport unicodecsv\n\n\nclass CsvConverter(Frame):\n def __init__(self, parent):\n Frame.__init__(self, parent, background=\"white\")\n self.parent = parent\n # Initialize some variables\n self.sourceFiles = ()\n self.Labeldisplay = StringVar()\n self.initUI()\n\n def initUI(self):\n self.parent.title(\"Excel To CSV Converter\")\n self.startWidgets()\n\n def startWidgets(self):\n # Label TOP\n label = Label(self, height= 2, textvariable=self.Labeldisplay, relief=RAISED )\n self.Labeldisplay.set(\"Please select files you want to convert.\")\n label.pack(side=TOP, fill=BOTH)\n\n # ScrollBar TOP\n self.scrollbar = Scrollbar(self)\n self.scrollbar.pack(side=RIGHT, fill=Y)\n self.scrollbarlist = Tkinter.Listbox(self, height=15, yscrollcommand=\n self.scrollbar.set)\n self.scrollbarlist.pack(side=TOP, fill=BOTH)\n self.scrollbar.config(command=self.scrollbarlist.yview)\n\n # Buttons LOWER LEFT\n self.BtnSelectFile = Tkinter.Button(self, text=\"Select Files\",\n command=self.openFile, height=10, width=20)\n self.BtnSelectFile.pack(side=LEFT)\n\n self.BtnReset = Tkinter.Button(self, state=DISABLED, text=\"Reset Files\",\n command=self.resetFile, height=10, width=20)\n self.BtnReset.pack(side=LEFT)\n\n self.BtnConvert = Tkinter.Button(self, text=\"Convert Files\", state=DISABLED,\n command=self.convertFile, height=10, width=20)\n self.BtnConvert.pack(side=LEFT)\n\n self.pack(fill=BOTH, expand=1)\n self.DialogBox(\"CSV Conversion\", \"Files Will be converted to \\\n their corresponding folders\")\n\n def resetFile(self):\n self.resetSourceFile()\n self.disableButtons()\n self.Labeldisplay.set(\"Reset files Successful\")\n\n def openFile(self):\n self.selectFiles()\n self.enableButtons()\n self.Labeldisplay.set(\"Add files Successful\")\n\n def selectFiles(self):\n ftypes = [('Excel Files', '*.xlsx'), ('Excel Files 2003', '*.xls'), ('All files', '*')]\n filez = tkFileDialog.askopenfilenames(parent=self.parent, filetypes=ftypes, title='Choose a file')\n for line in filez:\n self.scrollbarlist.insert(END, str(line))\n self.sourceFiles = filez\n\n def convertFile(self):\n self.convertCSV()\n self.disableButtons()\n self.resetSourceFile()\n self.DialogBox(\"CSV Conversion\", \"Converted Successfully\")\n\n def resetSourceFile(self):\n self.scrollbarlist.delete(0, END)\n self.sourceFiles = ()\n\n\n def convertCSV(self):\n if not self.sourceFiles:\n self.DialogBox(\"Error\", \"No Files Selected\")\n return\n\n for f in self.sourceFiles:\n if f.endswith('.xlsx'):\n self.scrollbarlist.delete(0, END)\n self.scrollbarlist.insert(END, \"Opening workbook \" + f)\n xl_wb = xlrd.open_workbook(f)\n xl_sh = xl_wb.sheet_by_index(0)\n\n f = splitext(f)[0]\n f += '.csv'\n\n self.scrollbarlist.insert(END, \"Converting to a CSV \" + f)\n\n xl_csv = open(f, 'wb')\n xl_wr = unicodecsv.writer(xl_csv, quoting=unicodecsv.QUOTE_ALL)\n for rownum in xrange(xl_sh.nrows):\n xl_wr.writerow(xl_sh.row_values(rownum))\n\n self.scrollbarlist.insert(END, \"Finished converting CSV\" + f)\n xl_csv.close()\n\n def DialogBox(self, Title, messageString):\n tkMessageBox.showinfo(Title, messageString)\n\n def enableButtons(self):\n self.BtnReset.config(state=\"normal\")\n self.BtnConvert.config(state=\"normal\")\n\n def disableButtons(self):\n self.BtnReset.config(state=\"disabled\")\n self.BtnConvert.config(state=\"disabled\")\n\n\ndef main():\n root = Tk()\n root.geometry(\"550x400+300+200\")\n app = CsvConverter(root)\n root.mainloop()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"GUIExcelToCSV.py","file_name":"GUIExcelToCSV.py","file_ext":"py","file_size_in_byte":4185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"117516917","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/vohl/Documents/code/shwirl/shwirl/interface/widgets/group_widgets.py\n# Compiled at: 2018-10-01 15:07:58\n# Size of source mod 2**32: 25975 bytes\nfrom vispy.color import get_colormaps\nfrom PyQt5.QtCore import Qt, pyqtSignal\nfrom PyQt5.QtWidgets import QWidget, QLabel, QTextEdit, QGridLayout, QVBoxLayout, QPushButton, QComboBox, QCheckBox, QSlider, QLineEdit, QFileDialog, QGroupBox\nimport numpy as np\nfrom astropy.io import fits\n\nclass GroupWidgets(QWidget):\n __doc__ = 'Class for the object widget creating the Qt signals\\n '\n signal_objet_changed = pyqtSignal(name='objectChanged')\n signal_file_loaded = pyqtSignal(name='fileLoaded')\n signal_camera_changed = pyqtSignal(name='cameraChanged')\n signal_fov_changed = pyqtSignal(name='fovChanged')\n signal_autorotate_changed = pyqtSignal(name='autorotateChanged')\n signal_scaling_changed = pyqtSignal(name='scalingChanged')\n signal_threshold_changed = pyqtSignal(name='thresholdChanged')\n signal_color_scale_changed = pyqtSignal(name='color_scaleChanged')\n signal_filter_size_changed = pyqtSignal(name='filter_sizeChanged')\n signal_filter_type_changed = pyqtSignal(name='filter_typeChanged')\n signal_high_discard_filter_changed = pyqtSignal(name='high_discard_filterChanged')\n signal_low_discard_filter_changed = pyqtSignal(name='low_discard_filterChanged')\n signal_density_factor_changed = pyqtSignal(name='density_factorChanged')\n signal_export_image = pyqtSignal(name='export_image')\n\n def __init__(self, type, parent=None):\n \"\"\"Initialise the GroupWidgets class\n\n Instanciate the widget's components for a given `type` of widget.\n\n Parameters\n -----------\n type : str\n Type of widget (one of MainWindow.widget_types).\n\n parent : class\n Parent class.\n \"\"\"\n super(GroupWidgets, self).__init__(parent)\n self.loaded_cube = None\n self.widgets_array = []\n self.widgets_dict = {}\n self.widgets_group_dict = {}\n self.widgets_group_array = []\n\n def serialize_widgets(key, group, array):\n self.widgets_array.append(array)\n self.widgets_dict[key] = array\n try:\n self.widgets_group_dict[group].append([key, array])\n except:\n self.widgets_group_array.append(group)\n self.widgets_group_dict[group] = []\n self.widgets_group_dict[group].append([key, array])\n\n if type == 'load_button':\n self.load_button = QPushButton('Load Spectral Cube', self)\n self.load_button.clicked.connect(self.showLoadFitsDialog)\n array = [self.load_button]\n serialize_widgets('fits_button', '', array)\n else:\n if type == 'view':\n l_cam = QLabel('camera ')\n self.camera = ['Turntable', 'Fly', 'Arcball']\n self.combo_camera = QComboBox(self)\n self.combo_camera.addItems(self.camera)\n self.combo_camera.currentIndexChanged.connect(self.update_view)\n array = [l_cam, self.combo_camera]\n serialize_widgets('camera', '', array)\n self.chk_autorotate = QCheckBox('Autorotate')\n self.chk_autorotate.setChecked(False)\n self.chk_autorotate.stateChanged.connect(self.update_autorotate)\n array = [self.chk_autorotate]\n serialize_widgets('autorotate', '', array)\n l_fov = QLabel('Field of View ')\n self.slider_fov = QSlider(Qt.Horizontal, self)\n self.slider_fov.setMinimum(0)\n self.slider_fov.setMaximum(160)\n self.slider_fov.setValue(60)\n self.slider_fov.setTickInterval(5)\n self.l_fov_value = QLineEdit(str(self.slider_fov.value()))\n self.slider_fov.valueChanged.connect(self.update_fov)\n array = [l_fov, self.slider_fov, self.l_fov_value]\n serialize_widgets('field_of_view', '', array)\n l_scalex = QLabel('Scale X ')\n self.slider_scalex = QSlider(Qt.Horizontal, self)\n self.slider_scalex.setMinimum(1)\n self.slider_scalex.setMaximum(20)\n self.slider_scalex.setValue(1)\n self.slider_scalex.setTickInterval(1)\n self.l_scalex_value = QLineEdit(str(self.slider_scalex.value()))\n self.slider_scalex.valueChanged.connect(self.update_scaling)\n array = [l_scalex, self.slider_scalex, self.l_scalex_value]\n serialize_widgets('scale_x', '', array)\n l_scaley = QLabel('Scale Y ')\n self.slider_scaley = QSlider(Qt.Horizontal, self)\n self.slider_scaley.setMinimum(1)\n self.slider_scaley.setMaximum(20)\n self.slider_scaley.setValue(1)\n self.slider_scaley.setTickInterval(1)\n self.l_scaley_value = QLineEdit(str(self.slider_scaley.value()))\n self.slider_scaley.valueChanged.connect(self.update_scaling)\n array = [l_scaley, self.slider_scaley, self.l_scaley_value]\n serialize_widgets('scale_y', '', array)\n l_scalez = QLabel('Scale Z ')\n self.slider_scalez = QSlider(Qt.Horizontal, self)\n self.slider_scalez.setMinimum(1)\n self.slider_scalez.setMaximum(20)\n self.slider_scalez.setValue(1)\n self.slider_scalez.setTickInterval(1)\n self.l_scalez_value = QLineEdit(str(self.slider_scalez.value()))\n self.slider_scalez.valueChanged.connect(self.update_scaling)\n array = [l_scalez, self.slider_scalez, self.l_scalez_value]\n serialize_widgets('scale_z', '', array)\n else:\n if type == 'rendering_params':\n l_tf_method = QLabel('Transfer function ')\n self.tf_method = [\n 'mip', 'lmip', 'avip', 'iso']\n self.combo_tf_method = QComboBox(self)\n self.combo_tf_method.addItems(self.tf_method)\n self.combo_tf_method.currentIndexChanged.connect(self.update_param)\n array = [l_tf_method, self.combo_tf_method]\n serialize_widgets('tf_method', '', array)\n l_density_factor = QLabel('Density regulator ')\n self.slider_density_factor = QSlider(Qt.Horizontal, self)\n self.slider_density_factor.setMinimum(0)\n self.slider_density_factor.setMaximum(10000)\n self.slider_density_factor.setValue(0)\n self.l_density_factor_value = QLineEdit(str(1))\n self.slider_density_factor.valueChanged.connect(self.update_density_factor)\n array = [l_density_factor, self.slider_density_factor, self.l_density_factor_value]\n serialize_widgets('density_factor', '', array)\n for widget in self.widgets_dict['density_factor']:\n widget.hide()\n\n l_color_method = QLabel('Colour method ')\n self.color_method = ['Moment 0', 'Moment 1', 'rgb_cube']\n self.combo_color_method = QComboBox(self)\n self.combo_color_method.addItems(self.color_method)\n self.combo_color_method.currentIndexChanged.connect(self.update_param)\n array = [l_color_method, self.combo_color_method]\n serialize_widgets('color_method', '', array)\n l_threshold = QLabel('Threshold ')\n self.slider_threshold = QSlider(Qt.Horizontal, self)\n self.slider_threshold.setMinimum(0)\n self.slider_threshold.setMaximum(10000)\n self.slider_threshold.setValue(10000)\n self.l_threshold_value = QLineEdit(str(self.slider_threshold.value()))\n self.slider_threshold.valueChanged.connect(self.update_threshold)\n array = [l_threshold, self.slider_threshold, self.l_threshold_value]\n serialize_widgets('threshold', '', array)\n for widget in self.widgets_dict['threshold']:\n widget.hide()\n\n l_interpolation_method = QLabel('Interpolation method ')\n self.interpolation_method = ['linear', 'nearest']\n self.combo_interpolation_method = QComboBox(self)\n self.combo_interpolation_method.addItems(self.interpolation_method)\n self.combo_interpolation_method.currentIndexChanged.connect(self.update_param)\n array = [l_interpolation_method, self.combo_interpolation_method]\n serialize_widgets('interpolation_method', '', array)\n l_cmap = QLabel('Colour map ')\n self.cmap = sorted((list(get_colormaps().keys())), key=(str.lower))\n self.combo_cmap = QComboBox(self)\n self.combo_cmap.addItems(self.cmap)\n self.combo_cmap.currentIndexChanged.connect(self.update_param)\n array = [l_cmap, self.combo_cmap]\n serialize_widgets('cmap', '', array)\n else:\n if type == 'smoothing':\n l_filter_size = QLabel('Box size')\n self.slider_filter_size = QSlider(Qt.Horizontal, self)\n self.slider_filter_size.setMinimum(0)\n self.slider_filter_size.setMaximum(10)\n self.slider_filter_size.setValue(0)\n self.l_filter_size_value = QLineEdit(str(self.slider_filter_size.value() + 1))\n self.slider_filter_size.valueChanged.connect(self.update_filter_size)\n array = [l_filter_size, self.slider_filter_size, self.l_filter_size_value]\n serialize_widgets('filter_size', '', array)\n l_gaussian_filter_size = QLabel('Gaussian size')\n self.chk_use_gaussian_filter = QCheckBox('Activate')\n self.chk_use_gaussian_filter.setChecked(False)\n self.chk_use_gaussian_filter.stateChanged.connect(self.update_gaussian_filter_size)\n self.gaussian_filter_size = ['5', '9', '13']\n self.combo_gaussian_filter_size = QComboBox(self)\n self.combo_gaussian_filter_size.addItems(self.gaussian_filter_size)\n self.combo_gaussian_filter_size.currentIndexChanged.connect(self.update_gaussian_filter_size)\n array = [l_gaussian_filter_size, self.combo_gaussian_filter_size, self.chk_use_gaussian_filter]\n serialize_widgets('gaussian_filter_size', '', array)\n else:\n if type == 'filtering':\n l_filter_type = QLabel('Filter type')\n self.filter_type = ['Filter out', 'Rescale']\n self.combo_filter_type = QComboBox(self)\n self.combo_filter_type.addItems(self.filter_type)\n self.combo_filter_type.currentIndexChanged.connect(self.update_filter_type)\n array = [l_filter_type, self.combo_filter_type]\n serialize_widgets('filter_type', '', array)\n l_high_discard_filter = QLabel('high filter ')\n self.slider_high_discard_filter = QSlider(Qt.Horizontal, self)\n self.slider_high_discard_filter.setMinimum(0)\n self.slider_high_discard_filter.setMaximum(10000)\n self.slider_high_discard_filter.setValue(10000)\n self.l_high_discard_filter_value = QLineEdit(str(self.slider_high_discard_filter.value()))\n self.slider_high_discard_filter.valueChanged.connect(self.update_high_discard_filter)\n array = [l_high_discard_filter, self.slider_high_discard_filter, self.l_high_discard_filter_value]\n serialize_widgets('high_discard_filter', '', array)\n l_low_discard_filter = QLabel('Low filter ')\n self.slider_low_discard_filter = QSlider(Qt.Horizontal, self)\n self.slider_low_discard_filter.setMinimum(0)\n self.slider_low_discard_filter.setMaximum(10000)\n self.slider_low_discard_filter.setValue(0)\n self.l_low_discard_filter_value = QLineEdit(str(self.slider_low_discard_filter.value()))\n self.slider_low_discard_filter.valueChanged.connect(self.update_low_discard_filter)\n array = [l_low_discard_filter, self.slider_low_discard_filter, self.l_low_discard_filter_value]\n serialize_widgets('low_discard_filter', '', array)\n else:\n if type == 'image':\n self.export_image_button = QPushButton('Save image to...', self)\n self.export_image_button.clicked.connect(self.export_image)\n array = [self.export_image_button]\n serialize_widgets('export_image', '', array)\n widgets_i = 0\n gbox2 = QGridLayout()\n group_i = 0\n for group in self.widgets_group_array:\n groupbox = QGroupBox(group)\n gbox = QGridLayout()\n for key, widgets in self.widgets_group_dict[group]:\n widget_i = 1\n for widget in widgets:\n gbox.addWidget(widget, widgets_i, widget_i)\n if type != 'load_button':\n widget.setEnabled(False)\n widget_i += 1\n\n widgets_i += 1\n\n groupbox.setLayout(gbox)\n gbox2.addWidget(groupbox, group_i, 0)\n group_i += 1\n\n vbox = QVBoxLayout()\n vbox.addItem(gbox2)\n vbox.addStretch(1.0)\n self.setLayout(vbox)\n\n def update_param(self):\n \"\"\"Update parameter related to a given transfer function.\n \"\"\"\n if self.combo_tf_method.currentText() in ('avip', 'translucent2'):\n for widget in self.widgets_dict['density_factor']:\n widget.show()\n\n else:\n for widget in self.widgets_dict['density_factor']:\n widget.hide()\n\n if self.combo_tf_method.currentText() == 'lmip' or self.combo_tf_method.currentText() == 'iso':\n for widget in self.widgets_dict['threshold']:\n widget.show()\n\n else:\n for widget in self.widgets_dict['threshold']:\n widget.hide()\n\n self.signal_objet_changed.emit()\n\n def showLoadFitsDialog(self):\n \"\"\"Show the dialog window to load a fits file.\n \"\"\"\n filename = QFileDialog.getOpenFileName(self, 'Open file',\n filter='FITS Images (*.fits, *.FITS)')\n if filename[0] != '':\n self.loaded_cube = fits.open(filename[0])\n try:\n self.vol_min = self.loaded_cube[0].header['DATAMIN']\n self.vol_max = self.loaded_cube[0].header['DATAMAX']\n except:\n if self.loaded_cube[0].header['NAXIS'] == 3:\n self.vol_min = np.nanmin(self.loaded_cube[0].data)\n self.vol_max = np.nanmax(self.loaded_cube[0].data)\n else:\n self.vol_min = np.nanmin(self.loaded_cube[0].data[0])\n self.vol_max = np.nanmax(self.loaded_cube[0].data[0])\n\n self.signal_file_loaded.emit()\n\n def update_discard_filter_text(self, min, max):\n \"\"\"Update the discard filter text field.\n\n Parameters\n ----------\n min : int, float\n Minimum value to be used by the filter\n max : int, float\n Maximum value to be used by the filter\n \"\"\"\n self.vol_min = min\n self.vol_max = max\n try:\n self.l_high_discard_filter_value.setText(self.format_digits(self.vol_max))\n self.l_low_discard_filter_value.setText(self.format_digits(self.vol_min))\n except:\n pass\n\n def enable_widgets(self):\n \"\"\"Enable widgets.\n\n At launch, all widgets except the `load fits` button are disabled. This function enables all widgets.\n \"\"\"\n for widgets in self.widgets_array:\n for widget in widgets:\n widget.setEnabled(True)\n\n def format_digits(self, value):\n \"\"\"Format digits to be printed.\n\n This function converts a numerical value to string\n \"\"\"\n if isinstance(value, int):\n return str(value)\n return '{:.4f}'.format(value)\n\n def update_view(self):\n \"\"\"Update view.\n\n Emits the Qt signal informing that the camera has changed.\n \"\"\"\n self.signal_camera_changed.emit()\n\n def update_fov(self):\n \"\"\"Update view.\n\n Updates the field of view value text string.\n Emits the Qt signal informing that the field of view has changed.\n \"\"\"\n self.l_fov_value.setText(str(self.slider_fov.value()))\n self.signal_fov_changed.emit()\n\n def update_autorotate(self):\n \"\"\"Update autorotate.\n\n Emits the Qt signal informing that autorotate has changed.\n \"\"\"\n self.signal_autorotate_changed.emit()\n\n def update_scaling(self):\n \"\"\"Update scaling.\n\n Updates the scale (x,y,z) value text strings.\n Emits the Qt signal informing that scaling has changed.\n \"\"\"\n self.l_scalex_value.setText(str(self.slider_scalex.value()))\n self.l_scaley_value.setText(str(self.slider_scaley.value()))\n self.l_scalez_value.setText(str(self.slider_scalez.value()))\n self.signal_scaling_changed.emit()\n\n def update_threshold(self):\n \"\"\"Update threshold.\n\n Computes the scaled value relative to the global min max of the data.\n Updates the threshold text field with the new scaled_value.\n Emits the Qt signal informing that threshold has changed.\n \"\"\"\n scaled_value = self.scale_value(self.slider_threshold.value(), self.slider_threshold.minimum(), self.slider_threshold.maximum(), self.vol_min, self.vol_max)\n self.l_threshold_value.setText(str(scaled_value))\n self.signal_threshold_changed.emit()\n\n def update_color_scale(self):\n \"\"\"Update color scale.\n\n Emits the Qt signal informing that color scale has changed.\n \"\"\"\n self.signal_color_scale_changed.emit()\n\n def update_filter_size(self):\n \"\"\"Update filter size.\n\n Updates the filter size text field.\n Emits the Qt signal informing that filter size has changed.\n \"\"\"\n self.l_filter_size_value.setText(str(self.slider_filter_size.value() + self.slider_filter_size.value() + 1))\n self.signal_filter_size_changed.emit()\n\n def update_filter_type(self):\n \"\"\"Update filter type.\n\n Emits the Qt signal informing that filter type has changed.\n \"\"\"\n self.signal_filter_type_changed.emit()\n\n def update_gaussian_filter_size(self):\n \"\"\"Update gaussian filter kernel size\n\n Emits the Qt signal informing that gaussian filter kernel size has changed.\n \"\"\"\n self.signal_filter_size_changed.emit()\n\n def update_high_discard_filter(self):\n \"\"\"Update the high discard filter value.\n\n Computes the scaled value relative to the global min max of the data.\n Updates the high discard filter text field with the new scaled value.\n Emits the Qt signal informing that high discard filter has changed.\n \"\"\"\n self.high_scaled_value = self.scale_value(self.slider_high_discard_filter.value(), self.slider_high_discard_filter.minimum(), self.slider_high_discard_filter.maximum(), self.vol_min, self.vol_max)\n if isinstance(self.high_scaled_value, int):\n self.l_high_discard_filter_value.setText(str(self.high_scaled_value))\n else:\n self.l_high_discard_filter_value.setText('{:.4f}'.format(self.high_scaled_value))\n self.signal_high_discard_filter_changed.emit()\n\n def update_low_discard_filter(self):\n \"\"\"Update the low discard filter value.\n\n Computes the scaled value relative to the global min max of the data.\n Updates the low discard filter text field with the new scaled value.\n Emits the Qt signal informing that low discard filter has changed.\n \"\"\"\n self.low_scaled_value = self.scale_value(self.slider_low_discard_filter.value(), self.slider_low_discard_filter.minimum(), self.slider_low_discard_filter.maximum(), self.vol_min, self.vol_max)\n if isinstance(self.l_low_discard_filter_value, int):\n self.l_low_discard_filter_value.setText(str(self.low_scaled_value))\n else:\n self.l_low_discard_filter_value.setText('{:.4f}'.format(self.low_scaled_value))\n self.signal_low_discard_filter_changed.emit()\n\n def update_density_factor(self):\n \"\"\"Update the density factor.\n\n Computes the scaled value relative to the global min max of the data.\n Updates the density factor text field with the new scaled value.\n Emits the Qt signal informing that high discard filter has changed.\n \"\"\"\n scaled_value = self.format_digits(self.scale_value(self.slider_density_factor.value(), self.slider_density_factor.minimum(), self.slider_density_factor.maximum(), 0.001, 2))\n self.l_density_factor_value.setText(str(scaled_value))\n self.signal_density_factor_changed.emit()\n\n def scale_value(self, old_value, old_min, old_max, new_min, new_max):\n \"\"\"Scale a value from it's original range to another, arbitrary, range.\n\n Parameters\n ----------\n old_value : int, float\n Value to be scaled\n old_min : int, float\n Minimum of the original range\n old_max : int, float\n Maximum of the original range\n new_min : int, float\n Minimum of the new range\n new_max : int, float\n Maximum of the new range\n\n Return\n ------\n new_value : float\n Scaled value\n \"\"\"\n old_range = old_max - old_min\n if old_range == 0:\n new_range = new_min\n else:\n new_range = new_max - new_min\n new_value = (old_value - old_min) * new_range / old_range + new_min\n return new_value\n\n def export_image(self):\n \"\"\"Export image.\n\n Emits the Qt signal informing that export image has been triggered.\n \"\"\"\n self.signal_export_image.emit()","sub_path":"pycfiles/shwirl-0.1.14.tar/group_widgets.cpython-37.py","file_name":"group_widgets.cpython-37.py","file_ext":"py","file_size_in_byte":23852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"587573914","text":"from pythainlp.tag import pos_tag\nfrom pythainlp.tokenize import word_tokenize\n\ndef main():\n with open('assets/with_out_position_verification/first_cosine_values_ranked.txt', 'r', encoding='utf8') as source:\n contents = (source.readlines())\n contents = [{'words':content.strip().split('=')[0].split('|'), 'score':content.strip().split('=')[1]} for content in contents]\n\n\n tagged_pos = [(pos_tag(content['words'], engine='artagger')) for content in contents]\n contents = [{'words': tagged_pos[index], 'score': content['score']} for index, content in enumerate(contents)]\n contents = [str('|'.join(str(x) for x in content['words'])) + '=' + content['score'] for content in contents]\n\n with open('assets/with_out_position_verification/first_cosine_values_ranked_tagged.txt', 'w', encoding='utf8') as result:\n result.write('\\n'.join(contents))\nif __name__ == '__main__':\n main()\n","sub_path":"first/c_pos_tagger.py","file_name":"c_pos_tagger.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"538194033","text":"#!/usr/bin/python\n'''\n (C) Copyright 2020 Intel Corporation.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE\n The Government's rights to use, modify, reproduce, release, perform, display,\n or disclose this software are subject to the terms of the Apache License as\n provided in Contract No. B609815.\n Any reproduction of computer software, computer software documentation, or\n portions thereof marked with this legend must also reproduce the markings.\n'''\nfrom data_mover_test_base import DataMoverTestBase\nfrom os.path import join, sep\n\n\nclass CopyProcsTest(DataMoverTestBase):\n # pylint: disable=too-many-ancestors\n \"\"\"Test class for Datamover multiple processes.\n\n Test Class Description:\n Tests multi-process (rank) copying of the datamover utility.\n Tests the following cases:\n Copying with varying numbers of processes (ranks).\n\n :avocado: recursive\n \"\"\"\n\n def setUp(self):\n \"\"\"Set up each test case.\"\"\"\n # Start the servers and agents\n super(CopyProcsTest, self).setUp()\n\n # Get the parameters\n self.test_file = self.params.get(\n \"test_file\", \"/run/ior/*\")\n self.flags_write = self.params.get(\n \"flags_write\", \"/run/ior/copy_procs/*\")\n self.flags_read = self.params.get(\n \"flags_read\", \"/run/ior/copy_procs/*\")\n\n # Setup the directory structures\n self.posix_test_paths.append(join(self.workdir, \"posix_test\") + sep)\n self.posix_test_paths.append(join(self.workdir, \"posix_test2\") + sep)\n self.posix_test_file = join(self.posix_test_paths[0], self.test_file)\n self.posix_test_file2 = join(self.posix_test_paths[1], self.test_file)\n self.daos_test_file = join(\"/\", self.test_file)\n\n # Create the directories\n cmd = \"mkdir -p {}\".format(self.get_posix_test_path_string())\n self.execute_cmd(cmd)\n\n def test_copy_procs(self):\n \"\"\"\n Test Description:\n DAOS-5659: Verify multi-process (rank) copying.\n Use Cases:\n Create pool.\n Crate POSIX container1 and container2 in pool.\n Create a single 100M file in container1 using ior.\n :avocado: tags=all,daily_regression\n :avocado: tags=small,hw\n :avocado: tags=copy_procs,datamover\n \"\"\"\n # Create pool and containers\n pool1 = self.create_pool()\n container1 = self.create_cont(pool1)\n container2 = self.create_cont(pool1)\n\n # Get the varying number of processes\n procs_list = self.params.get(\n \"processes\", \"/run/datamover/copy_procs/*\")\n\n # Create the test files\n self.set_ior_location_and_run(\"DAOS_UUID\", self.daos_test_file,\n pool1, container1,\n flags=self.flags_write)\n self.set_ior_location_and_run(\"POSIX\", self.posix_test_file,\n flags=self.flags_write)\n\n # DAOS -> POSIX\n # Run with varying number of processes\n self.set_src_location(\"DAOS_UUID\", \"/\", pool1, container1)\n self.set_dst_location(\"POSIX\", self.posix_test_paths[1])\n for num_procs in procs_list:\n test_desc = \"copy_procs (DAOS->POSIX with {} procs)\".format(\n num_procs)\n self.run_datamover(\n test_desc=test_desc,\n processes=num_procs)\n self.set_ior_location_and_run(\"POSIX\", self.posix_test_file2,\n flags=self.flags_read)\n\n # POSIX -> DAOS\n # Run with varying number of processes\n self.set_src_location(\"POSIX\", self.posix_test_paths[0])\n self.set_dst_location(\"DAOS_UUID\", \"/\", pool1, container2)\n for num_procs in procs_list:\n test_desc = \"copy_procs (POSIX->DAOS with {} processes)\".format(\n num_procs)\n self.run_datamover(\n test_desc=test_desc,\n processes=num_procs)\n self.set_ior_location_and_run(\"DAOS_UUID\", self.daos_test_file,\n pool1, container2,\n flags=self.flags_read)\n","sub_path":"src/tests/ftest/datamover/copy_procs.py","file_name":"copy_procs.py","file_ext":"py","file_size_in_byte":4762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"258302692","text":"# matching items in 2 lists.\n\ndef card_number(string): # gets the (integer) number of a single card\n if string[1].isnumeric() == True:\n # double digit number\n number = string[0:2]\n else: #if lst[i][1] == ' ':\n # single digit number\n number = string[0]\n return int(number)\n\n\n\n\nstraight_cards = ['3 H', '4 H', '4 D', '5 H', '6 H', '7 H']\nflush_cards = ['3 H', '5 H', '6 H', '7 H', '13 H', '4 H']\n\nStraightFlush_cards = set(straight_cards).intersection(flush_cards)\nprint(StraightFlush_cards)\n\nStraightFlush_cards = sorted(StraightFlush_cards, key = card_number)\nprint(StraightFlush_cards)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef card_number_list(lst):\n number_list = []\n for i in range(len(lst)):\n if lst[i][1].isnumeric() == True:\n # double digit number\n number = int(lst[i][0:2])\n else: #if lst[i][1] == ' ':\n # single digit number\n number = int(lst[i][0])\n number_list.append(number)\n\n return number_list\n\nprint(max(card_number_list(flush_cards)))\n\n","sub_path":"src/checking matching items from seperate lists.py","file_name":"checking matching items from seperate lists.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"218035288","text":"# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport webapp2\nimport os\nimport jinja2\n\nJINJA_ENVIRONMENT = jinja2.Environment(\n loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),\n extensions=['jinja2.ext.autoescape'],\n autoescape=True)\n\nclass MainPage(webapp2.RequestHandler):\n def get(self):\n mango = {\n \"name\": \"mangos\",\n \"amount\": 500,\n \"type\": \"fruit\",\n \"lethal_dose\": 600\n }\n\n pineapple = {\n \"name\": \"pineapple\",\n \"amount\": 25,\n \"type\": \"fruit\",\n \"lethal_dose\": 20\n }\n\n grape = {\n \"name\": \"grape\",\n \"amount\": 10000,\n \"type\": \"fruit\",\n \"lethal_dose\": 100\n }\n\n all_food = [mango, pineapple, grape]\n\n values = {\n \"food\": mango,\n \"all_food\": all_food\n }\n template = JINJA_ENVIRONMENT.get_template(\"templates/index.html\")\n self.response.headers['Content-Type'] = 'text/html'\n self.response.write(template.render(values))\n\n\napp = webapp2.WSGIApplication([\n ('/', MainPage),\n], debug=True)\n","sub_path":"my_fridge_app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"16656915","text":"import numpy as np\n\nimport pandas as pd\n\nfrom datetime import date\n\ndef doTheCalculation(data):\n\n\tdata['dayofyear']=(data['BILLING_DATE']-\n\n \tdata['BILLING_DATE'].apply(lambda x: date(x.year,1,1))\n\n \t.astype('datetime64[ns]')).apply(lambda x: x.days)\n\n\tX = np.array(data[['SALES_OFFICE', 'REGION_CODE', 'MATERIAL', 'MILL_CODE', 'SECTION_CODE', 'MATERIAL_SIZE','MATERIAL_GRADE_CODE', \n 'CUSTOMER_CODE','DIVISION', 'PANDEMIC_IND',\n \n \n \n ]])\n\n\treturn X\n","sub_path":"vsp_sales_day_feature_calculation_jan20.py","file_name":"vsp_sales_day_feature_calculation_jan20.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"52624834","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n###\n# File: multicollinearity.py\n# Project: Course_BiostatsMGH\n# File Created: Thursday, 19th September 2019 12:53:33 pm\n# Author: C.V (vinegoni@yahoo.com)\n# -----\n# Last Modified: Thursday, 19th September 2019 12:53:34 pm\n# Modified By: C.V (vinegoni@yahoo.com)\n# -----\n# Copyright 2019 - C.V\n###\n\n\nimport pandas as pd\nimport os\nfrom pathlib import Path\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\nBASE_DIR = Path(os.getcwd())\nFILE_DIR = BASE_DIR.joinpath('Files\\\\01').joinpath('Practicum 1_dataset.csv')\n\n\ndata = pd.read_csv(FILE_DIR)\nprint(data.head())\nprint(data.info())\nprint(data.describe())\npercentages = [0.25, 0.50, 0.75]\nquantile_data_1 = tuple(data.pcs.dropna() .quantile(percentage)\n for percentage in percentages)\nquantile_data_2 = [np.percentile(data.pcs.dropna().to_numpy(), percentage*100)\n for percentage in percentages]\npcs_array = data.pcs.to_numpy()\npcs_array = pcs_array[pcs_array == pcs_array]\nquantile_data_3 = [np.percentile(pcs_array, percentage*100)\n for percentage in percentages]\n\nmin_1 = data.pcs.dropna().min()\nmax_1 = data.pcs.dropna().max()\n\nmin_2 = pcs_array.min()\nmax_2 = pcs_array.max()\n\nmean_value = np.mean(pcs_array)\n\ninterquartile_range = abs(quantile_data_1[2]-quantile_data_1[1])\n\nprint(20*'*' + '\\n' + '5 numbers stats' + '\\n \\n')\nprint('Mean:\\t' + str(mean_value))\nprint('Median:\\t' + str(quantile_data_1[1]))\nprint('Min:\\t' + str(min_1))\nprint('Max:\\t' + str(max_1))\nprint('First Quartile (25%):\\t' + str(quantile_data_1[0]))\nprint('First Quartile (75%):\\t' + str(quantile_data_1[2]))\n","sub_path":"Biostats_01_a.py","file_name":"Biostats_01_a.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"5450464","text":"from esper.prelude import *\nfrom esper.rekall import *\nfrom .queries import query\n\n@query(\"Hermione in the center (rekall)\")\ndef hermione_in_the_center():\n from query.models import FaceCharacterActor\n from rekall.video_interval_collection import VideoIntervalCollection\n from rekall.parsers import in_array, bbox_payload_parser, merge_dict_parsers, dict_payload_parser\n from rekall.merge_ops import payload_plus\n from rekall.payload_predicates import payload_satisfies\n from rekall.spatial_predicates import scene_graph\n from rekall.bbox_predicates import height_at_least, left_of, same_value, same_height\n from esper.rekall import intrvllists_to_result_bbox\n\n STRIDE=10\n LIMIT=100\n MIN_FACE_HEIGHT = 0.12\n EPSILON = 0.15\n NAMES = [ 'ron weasley', 'hermione granger', 'harry potter' ]\n\n # Annotate face rows with start and end frames and the video ID\n faces_with_character_actor_qs = FaceCharacterActor.objects.annotate(\n min_frame=F('face__frame__number'),\n max_frame=F('face__frame__number'),\n video_id=F('face__frame__video_id'),\n bbox_x1=F('face__bbox_x1'),\n bbox_y1=F('face__bbox_y1'),\n bbox_x2=F('face__bbox_x2'),\n bbox_y2=F('face__bbox_y2'),\n character_name=F('characteractor__character__name')\n ).filter(face__frame__video__name__contains=\"harry potter\")\n\n faces_with_identity = VideoIntervalCollection.from_django_qs(\n faces_with_character_actor_qs,\n with_payload=in_array(merge_dict_parsers([\n bbox_payload_parser(VideoIntervalCollection.django_accessor),\n dict_payload_parser(VideoIntervalCollection.django_accessor, { 'character': 'character_name' }),\n ]))\n ).coalesce(payload_merge_op=payload_plus)\n\n harry_ron_hermione_scene_graph = {\n 'nodes': [\n { 'name': 'face1', 'predicates': [\n height_at_least(MIN_FACE_HEIGHT),\n lambda f: f['character'] == NAMES[0] or f['character'] == NAMES[2]\n ] },\n { 'name': 'face2', 'predicates': [\n height_at_least(MIN_FACE_HEIGHT),\n lambda f: f['character'] == NAMES[1]\n ] },\n { 'name': 'face3', 'predicates': [\n height_at_least(MIN_FACE_HEIGHT),\n lambda f: f['character'] == NAMES[0] or f['character'] == NAMES[2]\n ] }\n ],\n 'edges': [\n { 'start': 'face1', 'end': 'face2', 'predicates': [\n lambda f1, f2: f1['x1'] < f2['x1'],\n same_value('y1', epsilon=EPSILON),\n same_height(epsilon=EPSILON) \n ] },\n { 'start': 'face2', 'end': 'face3', 'predicates': [\n lambda f1, f2: f1['x1'] < f2['x1'],\n same_value('y1', epsilon=EPSILON),\n same_height(epsilon=EPSILON) \n ] },\n { 'start': 'face1', 'end': 'face3', 'predicates': [\n lambda f1, f2: f1['x1'] < f2['x1'],\n same_value('y1', epsilon=EPSILON),\n same_height(epsilon=EPSILON) \n ] }\n ]\n }\n\n harry_ron_hermione = faces_with_identity.filter(payload_satisfies(scene_graph(\n harry_ron_hermione_scene_graph,\n exact=True\n )))\n \n return intrvllists_to_result_bbox(harry_ron_hermione, limit=LIMIT, stride=STRIDE)\n","sub_path":"app/esper/queries/hermione_in_the_center.py","file_name":"hermione_in_the_center.py","file_ext":"py","file_size_in_byte":3361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"92773849","text":"from django.contrib.auth.models import User, Group\nfrom django.http import JsonResponse\nfrom rest_framework import viewsets\nfrom rest_framework.decorators import detail_route, list_route\nfrom rest_framework.permissions import IsAdminUser,IsAuthenticated\nfrom rest_framework.response import Response\nfrom .HyperlinkSerializer import *\nfrom .models import *\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n queryset = User.objects.all().order_by('-date_joined')\n serializer_class = UserSerializer\n permission_classes = [IsAdminUser]\n\n @detail_route(methods=['post'], permission_classes=[IsAuthenticated])\n def user_changepassword(self, request, pk=None):\n user = self.get_object()\n serializer = PasswordSerializer(data=request.data)\n if serializer.is_valid():\n user.set_password(serializer.data['password'])\n user.save()\n return Response({'status': 'password set'})\n else:\n return Response(serializer.errors,\n status=status.HTTP_400_BAD_REQUEST)\n\nclass GroupViewSet(viewsets.ModelViewSet):\n queryset = Group.objects.all()\n serializer_class = GroupSerializer\n permission_classes = [IsAdminUser]\n\nclass DictionaryViewSet(viewsets.ModelViewSet):\n queryset = Dictionary.objects.all()\n serializer_class = DictionarySerializer\n\nclass CateViewSet(viewsets.ModelViewSet):\n queryset = Cate.objects.all()\n serializer_class = CateSerializer\n\n @list_route()\n def cates_on_index(self, request):\n context = {}\n cateIndex = Cate.objects.filter(on_index=True)\n if cateIndex:\n context['cate_index'] = list(cateIndex.values('id','name'))\n context['list_products'] = {}\n for cate in cateIndex: \n products = Product.objects.filter(CATEGORY=cate)\n serializer = ProductSerializer(products, many=True)\n # serialize = serializers.serialize('json', products)\n context['list_products'].update({cate.name: serializer.data})\n\n return JsonResponse(context)\nclass ProductViewSet(viewsets.ModelViewSet):\n queryset = Product.objects.all()\n serializer_class = ProductSerializer\n\nclass ImageViewSet(viewsets.ModelViewSet):\n queryset = Image.objects.all()\n serializer_class = ImageSerializer\n\nclass PostViewSet(viewsets.ModelViewSet):\n queryset = Post.objects.all()\n serializer_class = PostSerializer\n\nclass DiscountCodeViewSet(viewsets.ModelViewSet):\n queryset = DiscountCode.objects.all()\n serializer_class = DiscountCodeSerializer\n permission_classes = [IsAdminUser]\n\nclass ShippingViewSet(viewsets.ModelViewSet):\n queryset = Shipping.objects.all()\n serializer_class = ShippingSerializer\n permission_classes = [IsAuthenticated]\n\nclass CartViewSet(viewsets.ModelViewSet):\n queryset = Cart.objects.all()\n serializer_class = ShippingSerializer\n permission_classes = [IsAdminUser]\n\n @list_route(permission_classes=[IsAuthenticated])\n def info(self,request, pk=None):\n context = {}\n\n if pk == None:\n cart = Cart.objects.get(id = pk, paymentId__isnull=True)\n context['cart'] = cart\n context['total'] = 0\n context['items'] = {}\n if cart:\n cartItem = CartItem.objects.filter(CART = cart)\n if cartItem:\n context['items'] = cartItem\n for item in cartItem:\n context['total'] += item.PRODUCT.price*item.qty\n else:\n user = User.objects.get(username = request.POST.get('username'))\n cart = Cart.objects.filter(USER = user, paymentId__isnull=True)\n context['total'] = 0\n context['items'] = 0\n if cart:\n cartItems = CartItem.objects.filter(CART = cart)\n if cartItems:\n context['items'] = cartItems.count()\n for item in cartItems:\n context['total'] += item.PRODUCT.price*item.qty\n \n return Response(context)\n @detail_route(methods=['post'], permission_classes=[IsAuthenticated])\n def add_discount(self, request):\n # pdb.set_trace()\n context = {}\n context['type'] = 'success'\n user = User.objects.get(username = request.POST.get('username'))\n carts = Cart.objects.filter(USER = user, paymentId__isnull=True)\n #check cart exist\n if not carts:\n context['message'] = 'no cart existed!'\n else:\n #check cart discount code applied\n current_cart = carts.first()\n if current_cart.DISCOUNT is None:\n discount = DiscountCode.objects.filter(code=request.POST.get('discount_code'), dueDate__gt = datetime.now())\n #check discount code exist\n if discount:\n current_cart.DISCOUNT = discount.get()\n current_cart.save()\n\n context['message'] = 'Discount Code applied'\n else:\n context['message'] = 'Discount Code is not existed'\n else:\n context['message'] = 'Discount Code have already applied'\n return Response(context)\n \n @detail_route(methods=['post'], permission_classes=[IsAuthenticated])\n def add_item(self, request):\n context = {}\n context['type'] = 'success'\n user = User.objects.get(username = request.POST.get('username'))\n product =Product.objects.filter(id = request.POST.get('product_id'))\n\n if product:\n if check_inbound(product.get(), 1):\n AddCartItem(user, product.get(), 1)\n\n context['type'] = 'success'\n context['message'] = 'Adding success!'\n else:\n context['type'] = 'error'\n context['message'] = 'Product out of stock!'\n else:\n context['type'] = 'error'\n context['message'] = 'Product isnot existed!'\n return Response(context)\n \n @detail_route(methods=['post'], permission_classes=[IsAuthenticated])\n def update_cart(self, request):\n # pdb.set_trace()\n user = User.objects.get(username = request.POST.get('username'))\n list_items = json.loads(request.POST.get('list_items'))\n cart = Cart.objects.filter(USER = user, paymentId__isnull=True)\n if cart:\n cartItems = CartItem.objects.filter(CART = cart.get())\n for item in cartItems:\n for i in list_items:\n if item.id == int(i['item_id']):\n if item.qty != int(i['qty']):\n item.qty = int(i['qty'])\n item.save()\n\n return Response({'type': 'success', 'message': 'job done'})\n\n @detail_route(methods=['post'], permission_classes=[IsAuthenticated])\n def clear(self, request):\n user = request.user\n cart = Cart.objects.filter(USER = user, Order_Cart = None).get()\n if cart:\n cartItems = CartItem.objects.filter(CART = cart)\n for item in cartItems:\n item.delete()\n return Response({'type': 'success', 'message': 'job is done!'})\n\n @detail_route(methods=['post'], permission_classes=[IsAuthenticated])\n def checkout(self, request):\n checkout_type = request.POST.get('checkout_type')\n if checkout_type == 'paypal':\n pass\n elif checkout_type == 'stripe':\n pass\n\nclass CartItemViewSet(viewsets.ModelViewSet):\n queryset = CartItem.objects.all()\n serializer_class = CartItemSerializer\n permission_classes = [IsAuthenticated]\n\nclass ReviewViewSet(viewsets.ModelViewSet):\n queryset = Review.objects.all()\n serializer_class = ReviewSerializer\n\nclass EventViewSet(viewsets.ModelViewSet):\n queryset = Event.objects.all()\n serializer_class = EventSerializer\n\nclass AddressViewSet(viewsets.ModelViewSet):\n queryset = Address.objects.all()\n serializer_class = AddressSerializer\n permission_classes = [IsAuthenticated]\n\n#reuse function\ndef AddCartItem(user, product, qty):\n\tcheck_cart_exist = Cart.objects.filter(paymentId__isnull=True, USER = user)\n\t#check unpaid cart: if not will create new cart, otherwise just add item to existed cart\n\tif check_cart_exist.count() > 0:\n\t\tcheck_item_cart = CartItem.objects.filter(CART = check_cart_exist.get(), PRODUCT = product)\n\t\t#check exist item in existed cart: if not create new item in cart, otherwise just add quantity to the existed item\n\t\tif check_item_cart.count() > 0:\n\t\t\titem = check_item_cart.get()\n\t\t\titem.qty += int(qty)\n\t\t\titem.save()\n\n\t\t\treturn item\n\t\telse:\n\t\t\tnew_cart_item = CartItem.objects.create(\n\t\t\t\tCART = check_cart_exist.get(),\n\t\t\t\tPRODUCT = product,\n\t\t\t\tqty = qty\n\t\t\t)\n\t\t\tnew_cart_item.save()\n\n\t\t\treturn new_cart_item\n\telse:\n\t\tnew_cart = Cart.objects.create(\n\t\t\tUSER = user\n\t\t)\n\t\tnew_cart.save()\n\n\t\tnew_cart_item = CartItem.objects.create(\n\t\t\tCART = new_cart,\n\t\t\tPRODUCT = product,\n\t\t\tqty = qty\n\t\t)\n\t\tnew_cart_item.save()\n\n\t\treturn new_cart_item\n\ndef check_inbound(product, qty):\n\tif product.amount > 0:\n\t\treturn True\n\telse:\n\t\treturn False","sub_path":"shop/ViewsSet.py","file_name":"ViewsSet.py","file_ext":"py","file_size_in_byte":9221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"73610657","text":"class Solution(object):\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n for i in range(len(nums)):\n if target - nums[i] in nums:\n index = len(nums) - nums[-1::-1].index(target - nums[i]) - 1\n if index != i:\n return [i, index]\n","sub_path":"1-TwoSum.py","file_name":"1-TwoSum.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"61396156","text":"from meta_policy_search.baselines.linear_baseline import LinearFeatureBaseline\nfrom meta_policy_search.envs.point_envs.point_env_2d_corner import MetaPointEnvCorner\nfrom meta_policy_search.envs.normalized_env import normalize\nfrom meta_policy_search.meta_algos.pro_mp import ProMP\nfrom meta_policy_search.meta_trainer import Trainer\nfrom meta_policy_search.samplers.meta_sampler import MetaSampler\nfrom meta_policy_search.samplers.meta_sample_processor import MetaSampleProcessor\nfrom meta_policy_search.policies.meta_gaussian_mlp_policy import MetaGaussianMLPPolicy\nfrom meta_policy_search.policies.conv import MAMLGaussianMLPPolicy\nfrom meta_policy_search.utils import logger\nfrom meta_policy_search.utils.utils import set_seed, ClassEncoder\n\nimport numpy as np\nimport tensorflow as tf\nimport os\nimport json\nimport argparse\nimport time\n\n# Import for mario\nfrom railrl.torch.metac.gcg.make_env import make_env\n\nmeta_policy_search_path = '/'.join(os.path.realpath(os.path.dirname(__file__)).split('/')[:-1])\n\ndef main(config):\n set_seed(config['seed'])\n\n \n baseline = globals()[config['baseline']]() #instantiate baseline\n env = make_env(config['env_id'], config)\n # import pdb; pdb.set_trace()# env = globals()[config['env']]() # instantiate env\n # env = normalize(env) # apply normalize wrapper to env\n\n print(\"MARIO obs shape\", env.observation_space.shape)\n policy = MAMLGaussianMLPPolicy(\n 'conv',\n obs_dim=int(np.prod(env.observation_space.shape)),\n action_dim=int(np.prod(env.action_space.shape)),\n meta_batch_size=config['meta_batch_size'],\n hidden_sizes=config['hidden_sizes'],\n )\n\n sampler = MetaSampler(\n env=env,\n policy=policy,\n rollouts_per_meta_task=config['rollouts_per_meta_task'], # This batch_size is confusing\n meta_batch_size=config['meta_batch_size'],\n max_path_length=config['max_path_length'],\n parallel=config['parallel'],\n )\n\n sample_processor = MetaSampleProcessor(\n baseline=baseline,\n discount=config['discount'],\n gae_lambda=config['gae_lambda'],\n normalize_adv=config['normalize_adv'],\n )\n\n algo = ProMP(\n policy=policy,\n inner_lr=config['inner_lr'],\n meta_batch_size=config['meta_batch_size'],\n num_inner_grad_steps=config['num_inner_grad_steps'],\n learning_rate=config['learning_rate'],\n num_ppo_steps=config['num_promp_steps'],\n clip_eps=config['clip_eps'],\n target_inner_step=config['target_inner_step'],\n init_inner_kl_penalty=config['init_inner_kl_penalty'],\n adaptive_inner_kl_penalty=config['adaptive_inner_kl_penalty'],\n )\n\n trainer = Trainer(\n algo=algo,\n policy=policy,\n env=env,\n sampler=sampler,\n sample_processor=sample_processor,\n n_itr=config['n_itr'],\n num_inner_grad_steps=config['num_inner_grad_steps'],\n )\n\n trainer.train()\n\nif __name__==\"__main__\":\n idx = int(time.time())\n\n parser = argparse.ArgumentParser(description='ProMP: Proximal Meta-Policy Search')\n parser.add_argument('--config_file', type=str, default='', help='json file with run specifications')\n parser.add_argument('--dump_path', type=str, default=meta_policy_search_path + '/data/pro-mp/run_%d' % idx)\n\n args = parser.parse_args()\n\n\n if args.config_file: # load configuration from json file\n with open(args.config_file, 'r') as f:\n config = json.load(f)\n\n else: # use default config\n\n config = {\n 'seed': 1,\n 'baseline': 'LinearFeatureBaseline',\n 'env_id': 'mariomultilevel',\n\n # sampler config\n 'rollouts_per_meta_task': 2,\n 'max_path_length': 10,\n 'parallel': True,\n\n # sample processor config\n 'discount': 0.99,\n 'gae_lambda': 1,\n 'normalize_adv': True,\n\n # policy config\n 'hidden_sizes': (64, 64),\n 'learn_std': True, # whether to learn the standard deviation of the gaussian policy\n\n # ProMP config\n 'inner_lr': 0.1, # adaptation step size\n 'learning_rate': 1e-3, # meta-policy gradient step size\n 'num_promp_steps': 5, # number of ProMp steps without re-sampling\n 'clip_eps': 0.3, # clipping range\n 'target_inner_step': 0.01,\n 'init_inner_kl_penalty': 5e-4,\n 'adaptive_inner_kl_penalty': False, # whether to use an adaptive or fixed KL-penalty coefficient\n 'n_itr': 1001, # number of overall training iterations\n 'meta_batch_size': 40, # number of sampled meta-tasks per iterations\n 'num_inner_grad_steps': 1, # number of inner / adaptation gradient steps\n \n # Mario config\n \"env_kwargs\" : {\n \"screen_size\": 20,\n \"grayscale_obs\": False,\n \"frame_skip\": 1,\n \"lifelong\": False,\n \"max_lives\": 1,\n \"scramble_action_freq\": 0,\n \"frame_stack\": 1,\n \"action_stack\": 0,\n \"default_level\": 0,\n \"shuffle_env_actions\": True,\n \"shuffle_envs\": False,\n \"singletask\": True\n },\n\n \"algo_kwargs\":{\n \"batch_size\":8,\n \"adapt_batch_size\": 64,\n \"meta_batch_size\":26,\n \"test_size\": 6,\n \"mpc_horizon\":5,\n \"window_len\": 200,\n \"min_num_steps_before_training\": 1000,\n \"min_num_steps_before_adapting\": 7,\n \"num_expl_steps_per_train_loop\": 100,\n \"max_path_length\":1000,\n \"eval_freq\": 10,\n \"outer_update_steps\":20,\n \"inner_update_steps\":4,\n \"adapt_freq\": 1,\n \"num_adapt_steps\": 5,\n \"num_epochs\":10000,\n \"inner_lr\":1e-3,\n \"inner_opt_name\": \"SGD\",\n \"adapt_opt_name\": \"SGD\",\n \"adapt_inner_lr\": 1e-3,\n \"debug\":False,\n \"use_consecutive_batch\": False,\n \"reset_meta_model\": True,\n \"adapt_same_batch\": False,\n \"train_same_batch\": True,\n \"shuffle_actions\": False,\n \"explore_if_stuck\": False,\n \"shuffle_env_actions\": False,\n \"adapt_from_replay\": False,\n \"test_buffer_size\": 550,\n \"save_buffer\": True\n },\n\n \"trainer_kwargs\":{\n \"learning_rate\":1e-4,\n \"discount\":0.99,\n \"data_type\": \"uint8\",\n \"opt_name\": \"Adam\",\n \"optimizer_kwargs\": {\n \"weight_decay\": 0\n },\n \"bayesian\": False\n },\n\n \"controller_kwargs\": {\n \"num_simulated_paths\":500,\n \"cem_steps\":3\n },\n\n \"reward_predictor_kwargs\":{\n \"reward_type\":\"categorical\",\n \"num_bins\":41\n },\n \"replay_buffer_kwargs\":{\n \"max_replay_buffer_size\":20000\n },\n \"adaptive_replay_buffer_kwargs\":{\n \"max_replay_buffer_size\":10\n },\n \"extra_args\": {\n \"prior_sigma_1\": 0.001,\n \"prior_pi\": 1.0,\n \"posterior_rho_init\": -6\n },\n \"model_kwargs\": {\n \t\"data_type\": \"uint8\",\n \"reward_scale\": 10.0,\n \"bayesian\": False,\n \"conv_norm_type\": \"layer\"\n },\n \"log_comet\": True,\n \"debug\": False,\n \"use_gpu\": True,\n }\n\n # configure logger\n logger.configure(dir=args.dump_path, format_strs=['stdout', 'log', 'csv'],\n snapshot_mode='last_gap')\n\n # dump run configuration before starting training\n json.dump(config, open(args.dump_path + '/params.json', 'w'), cls=ClassEncoder)\n\n # start the actual algorithm\n main(config)\n","sub_path":"run_scripts/pro-mp_run_mario.py","file_name":"pro-mp_run_mario.py","file_ext":"py","file_size_in_byte":8191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"346932004","text":"from nltk.corpus import brown\nimport re\nfrom gensim import models, corpora\nfrom nltk import word_tokenize\nfrom nltk.corpus import stopwords\n\ndata = []\npattern = r\"[a-zA-Z\\-]{3,}\"\n\nfor file in brown.fileids():\n document = ' '.join(brown.words(file))\n data.append(document)\n\n# print('\\n'.join(data[0:5]))\nn = len(data) # there are 500 rows in brown corpus\nn_topics = 10\nstop_word = stopwords.words('english')\n\n\ndef clean_text(data):\n tokenized = word_tokenize(data.lower())\n cleaned = [t for t in tokenized if t not in stop_word and\n re.match(pattern, t)] # take words have 3 characters at least\n return cleaned\n\n\ntokenized_data = [clean_text(row) for row in data]\n# print(tokenized_data[:5])\n\ndict_ = corpora.Dictionary(tokenized_data)\ncorpus = [dict_.doc2bow(row) for row in tokenized_data] # (word_id, frequency) for each word\n# print(corpus[:5])\n\nprint('-'*10, \"LDA Model\", '-'*10)\nlda = models.LdaModel(corpus=corpus, num_topics=n_topics, id2word=dict_)\nfor i in range(n_topics):\n print(\"Topic-{}, words:{}\".format(i, lda.print_topic(i, 5)))\n\nprint('*'*95)\nprint('-'*10, \"LSI Model\", '-'*10)\nlsi = models.LsiModel(corpus=corpus, num_topics=n_topics, id2word=dict_)\nfor i in range(n_topics):\n print(\"Topic-{}, words:{}\".format(i, lsi.print_topic(i, 5)))\n","sub_path":"lda-lsi-gensim.py","file_name":"lda-lsi-gensim.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"408629986","text":"from django import template\n\nregister = template.Library()\n\n\n@register.filter\ndef color_value(value, idx):\n color = 'white'\n try:\n value = float(value)\n except ValueError:\n return color\n if idx > 1:\n if idx == 14:\n color = 'grey'\n elif value < 0:\n color = 'green'\n elif value > 5:\n color = 'red'\n elif 2 < value <= 5:\n color = '#f98c8c'\n elif 1 < value <= 2:\n color = '#fcd3d3'\n return color\n","sub_path":"dynamic-templates/task1/app/templatetags/color_filters.py","file_name":"color_filters.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"410638942","text":"import asyncio\nfrom pathlib import Path\nimport random\nfrom typing import Tuple, List\n\nimport httpx\nfrom PIL.Image import Image as PILImage\nimport skimage.measure\nimport torch\nfrom torch.utils.data.dataset import Dataset\nfrom torchvision.transforms import Normalize\nfrom torchvision.transforms import ToTensor\nfrom torchvision.transforms import Compose\nfrom torchvision.transforms import Resize\nfrom torchvision.transforms import RandomHorizontalFlip\nfrom torchvision.transforms import RandomVerticalFlip\n\nfrom geobacter.inference.geotypes import Extent\nfrom geobacter.inference.mapnik import get_extent\nfrom geobacter.inference.datasets.sample import TripletSample\n\nAUGMENTATIONS = Compose([\n RandomHorizontalFlip(),\n RandomVerticalFlip()\n])\nBASE_TRANSFORMS = Compose([\n Resize((128, 128)),\n ToTensor(),\n Normalize(\n [0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225]\n ),\n])\n# Reverse the mean std normalization (useful for visualising\n# images in Tensorboard.\nDENORMALIZE = Normalize(\n [-0.485 / 0.229, -0.456 / 0.224, -0.406 / 0.225],\n [1 / 0.229, 1 / 0.224, 1 / 0.225]\n)\n\n\nclass OsmTileDataset(Dataset):\n def __init__(\n self,\n samples: List[TripletSample],\n cache_dir: Path\n ):\n self.samples = samples\n self.negative_extents = [\n sample.anchor.extent for sample in self.samples\n ] + [\n sample.positive.extent for sample in self.samples\n ]\n self.cache_dir = cache_dir\n self.client = httpx.AsyncClient()\n self.index_to_entropy = {}\n print(f\"OsmTileDataset initialised. samples={len(self.samples)}\")\n\n def __len__(self):\n return len(self.samples)\n\n def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n anchor, positive, negative = self.load_triplet_images(index)\n\n anchor = BASE_TRANSFORMS(\n AUGMENTATIONS(\n anchor\n )\n )\n positive = BASE_TRANSFORMS(\n AUGMENTATIONS(\n positive\n )\n )\n negative = BASE_TRANSFORMS(\n AUGMENTATIONS(\n negative\n )\n )\n\n return anchor, positive, negative\n\n def anchor_entropy(self, index: int) -> float:\n async def _anchor_entropy():\n if index not in self.index_to_entropy:\n self.index_to_entropy[index] = await extent_to_entropy(\n self.samples[index].anchor.extent,\n self.cache_dir,\n self.client\n )\n return self.index_to_entropy[index]\n\n loop = asyncio.get_event_loop()\n task = loop.create_task(_anchor_entropy())\n return loop.run_until_complete(task)\n\n def sample(self, index: int) -> TripletSample:\n return self.samples[index]\n\n def load_triplet_images(self, index: int) -> Tuple[PILImage, PILImage, PILImage]:\n async def _load_triplet():\n a = await get_extent(\n self.samples[index].anchor.extent,\n cache_dir=self.cache_dir, zoom=16, client=self.client\n )\n self.index_to_entropy[index] = skimage.measure.shannon_entropy(a.convert('LA'))\n p = await get_extent(\n self.samples[index].positive.extent,\n cache_dir=self.cache_dir, zoom=16, client=self.client\n )\n n = await get_extent(\n random.choice(self.negative_extents),\n cache_dir=self.cache_dir, zoom=16, client=self.client\n )\n return a, p, n\n\n loop = asyncio.get_event_loop()\n task = loop.create_task(_load_triplet())\n return loop.run_until_complete(task)\n\n\nasync def extent_to_entropy(extent: Extent, cache_dir: Path, client: httpx.AsyncClient) -> float:\n image = await get_extent(\n extent,\n cache_dir=cache_dir, zoom=16, client=client\n )\n return skimage.measure.shannon_entropy(image.convert('LA'))\n","sub_path":"geobacter/inference/datasets/osm.py","file_name":"osm.py","file_ext":"py","file_size_in_byte":4024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"85173355","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('products', '0006_auto_20160820_1417'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='product',\n name='product_slider',\n field=models.CharField(default=b'a', max_length=10, blank=True, choices=[(b'a', b'\\xd0\\x9d\\xd0\\x95\\xd0\\xa2'), (b'b', b'TOP'), (b'c', b'\\xd0\\x90\\xd0\\xba\\xd1\\x86\\xd0\\xb8\\xd1\\x8f'), (b'd', b'\\xd0\\xa1\\xd0\\xba\\xd0\\xb8\\xd0\\xb4\\xd0\\xba\\xd0\\xb0'), (b'e', b'\\xd0\\xa0\\xd0\\xb0\\xd1\\x81\\xd0\\xbf\\xd1\\x80\\xd0\\xbe\\xd0\\xb4\\xd0\\xb0\\xd0\\xb6\\xd0\\xb0')]),\n ),\n ]\n","sub_path":"apps/products/migrations/0007_auto_20160820_1434.py","file_name":"0007_auto_20160820_1434.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"563144123","text":"\"\"\"\nMIDI messages\n\nThere is no need to use this module directly. All you need is\navailable in the top level module.\n\"\"\"\nimport sys\n\nPY2 = (sys.version_info.major == 2)\n\n# Pitchwheel is a 14 bit signed integer\nMIN_PITCHWHEEL = -8192\nMAX_PITCHWHEEL = 8191\n\n# Song pos is a 14 bit unsigned integer\nMIN_SONGPOS = 0\nMAX_SONGPOS = 16383\n\nclass MessageSpec(object):\n \"\"\"\n Specifications for creating a message.\n \n status_byte is the first byte of the message. For channel\n messages, the channel (lower 4 bits) is clear.\n\n type is the type name of the message, for example 'sysex'.\n\n arguments is the attributes / keywords arguments specific to\n this message type.\n\n length is the length of this message in bytes. This value is not used\n for sysex messages, since they use an end byte instead.\n\n Table of MIDI messages:\n\n http://www.midi.org/techspecs/midimessages.php\n \"\"\"\n\n def __init__(self, status_byte, type_, arguments, length):\n \"\"\"Create a new message specification.\"\"\"\n self.status_byte = status_byte\n self.type = type_\n self.arguments = arguments\n self.length = length\n \n # Attributes that can be set on the object\n self.valid_attributes = set(self.arguments) | {'time'}\n\n def signature(self):\n \"\"\"Return call signature for Message constructor for this type.\n\n The signature is returned as a string.\n \"\"\"\n parts = []\n parts.append(repr(self.type))\n\n for name in self.arguments:\n if name == 'data':\n parts.append('data=()')\n else:\n parts.append('{}=0'.format(name))\n parts.append('time=0')\n\n sig = '({})'.format(', '.join(parts))\n\n return sig\n\n\ndef get_message_specs():\n return [\n # Channel messages\n MessageSpec(0x80, 'note_off', ('channel', 'note', 'velocity'), 3),\n MessageSpec(0x90, 'note_on', ('channel', 'note', 'velocity'), 3),\n MessageSpec(0xa0, 'polytouch', ('channel', 'note', 'value'), 3),\n MessageSpec(0xb0, 'control_change',\n ('channel', 'control', 'value'), 3),\n MessageSpec(0xc0, 'program_change', ('channel', 'program',), 2),\n MessageSpec(0xd0, 'aftertouch', ('channel', 'value',), 2),\n MessageSpec(0xe0, 'pitchwheel', ('channel', 'pitch',), 3),\n\n # System common messages\n MessageSpec(0xf0, 'sysex', ('data',), float('inf')),\n MessageSpec(0xf1, 'quarter_frame', ('frame_type', 'frame_value'), 2),\n MessageSpec(0xf2, 'songpos', ('pos',), 3),\n MessageSpec(0xf3, 'song_select', ('song',), 2),\n # 0xf4 is undefined.\n # 0xf5 is undefined.\n MessageSpec(0xf6, 'tune_request', (), 1),\n # 0xf7 is the stop byte for sysex messages, so should not be a message.\n\n # System real time messages\n MessageSpec(0xf8, 'clock', (), 1),\n # 0xf9 is undefined.\n MessageSpec(0xfa, 'start', (), 1),\n MessageSpec(0xfb, 'continue', (), 1),\n MessageSpec(0xfc, 'stop', (), 1),\n # 0xfd is undefined.\n MessageSpec(0xfe, 'active_sensing', (), 1),\n MessageSpec(0xff, 'reset', (), 1),\n ]\n\n\ndef build_spec_lookup(message_specs):\n lookup = {}\n\n for spec in message_specs:\n status_byte = spec.status_byte\n\n if status_byte < 0xf0:\n # Channel message.\n # The upper 4 bits are message type, and\n # the lower 4 are MIDI channel.\n # We need lookup for all 16 MIDI channels.\n for channel in range(16):\n lookup[status_byte | channel] = spec\n else:\n lookup[status_byte] = spec\n\n lookup[spec.type] = spec\n\n return lookup\n\n\ndef get_spec(type_or_status_byte):\n \"\"\"Get message specification from status byte or message type name.\n\n For use in writing parsers.\n \"\"\"\n try:\n return Message._spec_lookup[type_or_status_byte]\n except KeyError:\n raise LookupError('unknown type or status byte')\n\n\ndef check_time(time):\n \"\"\"Check type and value of time.\n \n Raises TypeError if value is not an integer or a float\n \"\"\"\n if PY2 and isinstance(time, long):\n return\n\n if not (isinstance(time, int) or isinstance(time, float)):\n raise TypeError('time must be an integer or float')\n\n\ndef check_channel(channel):\n \"\"\"Check type and value of channel.\n\n Raises TypeError if the value is not an integer, and ValueError if\n it is outside range 0..127.\n \"\"\"\n if not isinstance(channel, int):\n raise TypeError('channel must be an integer')\n elif not 0 <= channel <= 15:\n raise ValueError('channel must be in range 0..15')\n\n\ndef check_pos(pos):\n \"\"\"Check type and value of song position.\n\n Raise TypeError if the value is not an integer, and ValueError if\n it is outside range MIN_SONGPOS..MAX_SONGPOS.\n \"\"\"\n if not isinstance(pos, int):\n raise TypeError('song pos must be and integer')\n elif not MIN_SONGPOS <= pos <= MAX_SONGPOS:\n raise ValueError('song pos must be in range {}..{}'.format(\n MIN_SONGPOS, MAX_SONGPOS))\n\n\ndef check_pitch(pitch):\n \"\"\"Raise TypeError if the value is not an integer, and ValueError\n if it is outside range MIN_PITCHWHEEL..MAX_PITCHWHEEL.\n \"\"\"\n if not isinstance(pitch, int):\n raise TypeError('pichwheel value must be an integer')\n elif not MIN_PITCHWHEEL <= pitch <= MAX_PITCHWHEEL:\n raise ValueError('pitchwheel value must be in range {}..{}'.format(\n MIN_PITCHWHEEL, MAX_PITCHWHEEL))\n\n\ndef check_data(data_bytes):\n \"\"\"Check type of data_byte and type and range of each data byte.\n\n Returns the data bytes as a tuple of integers.\n\n Raises TypeError if value is not iterable.\n Raises TypeError if one of the bytes is not an integer.\n Raises ValueError if one of the bytes is out of range 0..127.\n \"\"\"\n # Make the sequence immutable.\n data_bytes = tuple(data_bytes)\n\n for byte in data_bytes:\n check_databyte(byte)\n\n return data_bytes\n\n\ndef check_databyte(value):\n \"\"\"Raise exception of byte has wrong type or is out of range\n\n Raises TypeError if the byte is not an integer, and ValueError if\n it is out of range. Data bytes are 7 bit, so the valid range is\n 0..127.\n \"\"\"\n if not isinstance(value, int):\n raise TypeError('data byte must be an integer')\n elif not 0 <= value <= 127:\n raise ValueError('data byte must be in range 0..127')\n\n\ndef encode_channel(channel):\n \"\"\"Convert channel into a list of bytes. Return an empty list of\n bytes, since channel is already masked into status byte.\n \"\"\"\n return []\n\n\ndef encode_data(data):\n \"\"\"Encode sysex data as a list of bytes. A sysex end byte (0xf7)\n is appended.\n \"\"\"\n return list(data) + [0xf7]\n\n \ndef encode_pitch(pitch):\n \"\"\"Encode pitchwheel pitch as a list of bytes.\"\"\"\n pitch -= MIN_PITCHWHEEL\n return [pitch & 0x7f, pitch >> 7]\n\n\ndef encode_pos(pos):\n \"\"\"Encode song position as a list of bytes.\"\"\"\n return [pos & 0x7f, pos >> 7]\n\n\nclass BaseMessage(object):\n \"\"\"Base class for MIDI messages.\n\n Can be subclassed to create meta messages, for example.\n \"\"\"\n pass\n\nclass Message(BaseMessage):\n \"\"\"\n MIDI message class.\n \"\"\"\n\n # Quick lookup of specs by name or status_byte.\n _spec_lookup = build_spec_lookup(get_message_specs())\n\n def __init__(self, type_, **parameters):\n \"\"\"Create a new message.\n\n The first argument is typically the type of message to create,\n for example 'note_on'.\n\n It can also be the status_byte, that is the first byte of the\n message. For channel messages, the channel (lower 4 bits of\n the status_byte) is masked out from the lower 4 bits of the\n status byte. This can be overriden by passing the 'channel'\n keyword argument.\n \"\"\"\n try:\n spec = self._spec_lookup[type_]\n except KeyError:\n text = '{!r} is an invalid type name or status byte'\n raise ValueError(text.format(type_))\n\n self._set('_spec', spec)\n self._set('type', self._spec.type)\n\n self._set_attributes_to_default_values(type_)\n self._override_attributes(parameters)\n\n def _set_attributes_to_default_values(self, type_):\n for name in self._spec.arguments:\n if name == 'velocity':\n self._set('velocity', 0x40)\n elif name == 'channel':\n # This is a channel message, so if the first\n # argument to this function was a status_byte,\n # the lower 4 bits will contain the channel.\n if isinstance(type_, int):\n self._set('channel', type_ & 0x0f)\n else:\n self._set('channel', 0)\n elif name == 'data':\n self._set('data', ())\n else:\n self._set(name, 0)\n self._set('time', 0)\n\n def _override_attributes(self, parameters):\n for name, value in parameters.items():\n try:\n setattr(self, name, value)\n except AttributeError:\n raise ValueError('{!r} is an invalid'\n ' keyword argument for this message type'\n ''.format(name))\n\n def copy(self, **overrides):\n \"\"\"Return a copy of the message.\n\n Attributes will be overriden by the passed keyword arguments.\n Only message specific attributes can be overridden. The message\n type can not be changed.\n\n Example:\n\n a = Message('note_on')\n b = a.copy(velocity=32)\n \"\"\"\n # Get values from this object\n arguments = {}\n for name in self._spec.valid_attributes:\n if name in overrides:\n arguments[name] = overrides[name]\n else:\n arguments[name] = getattr(self, name)\n\n for name in overrides:\n if name not in self._spec.valid_attributes:\n text = '{!r} is an invalid argument for this message type'\n raise ValueError(text.format(name))\n\n return self.__class__(self.type, **arguments)\n\n def _set(self, name, value):\n \"\"\"Sets an attribute directly, bypassing all type and value checks\"\"\"\n self.__dict__[name] = value\n\n def __setattr__(self, name, value):\n \"\"\"Set an attribute.\"\"\"\n\n if name in self._spec.valid_attributes:\n try:\n check = globals()['check_{}'.format(name)]\n except KeyError:\n check = check_databyte\n\n ret = check(value)\n if name == 'data':\n value = ret\n\n self.__dict__[name] = value\n elif name in self.__dict__:\n raise AttributeError('{} attribute is read only'.format(name))\n else:\n raise AttributeError('{} message has no attribute {}'.format(\n self.type, name))\n\n def __delattr__(self, name):\n raise AttributeError('attribute can not be deleted')\n\n def bytes(self):\n \"\"\"Encode message and return as a list of integers.\"\"\"\n\n status_byte = self._spec.status_byte\n if status_byte < 0xf0:\n # Add channel (lower 4 bits) to status byte.\n # Those bits in spec.status_byte are always 0.\n status_byte |= self.channel\n\n message_bytes = [status_byte]\n\n for name in self._spec.arguments:\n value = getattr(self, name)\n try:\n encode = globals()['encode_{}'.format(name)]\n message_bytes.extend(encode(value))\n except KeyError:\n message_bytes.append(value)\n\n return message_bytes\n\n def bin(self):\n \"\"\"Encode message and return as a bytearray.\n\n This can be used to write the message to a file.\n \"\"\"\n return bytearray(self.bytes())\n\n def hex(self, sep=' '):\n \"\"\"Encode message and return as a string of hex numbers,\n\n Each number is separated by the string sep.\n \"\"\"\n return sep.join(['{:02X}'.format(byte) for byte in self.bytes()])\n\n def __repr__(self):\n parts = []\n\n for name in self._spec.arguments + ('time',):\n parts.append('{}={!r}'.format(name, getattr(self, name)))\n\n return ''.format(self.type, ', '.join(parts))\n\n def __str__(self):\n return _format_as_string(self)\n\n def __eq__(self, other):\n \"\"\"Compare message to another for equality.\n \n Key for comparison: (msg.type, msg.channel, msg.note, msg.velocity).\n \"\"\"\n if not isinstance(other, Message):\n raise TypeError('comparison between Message and another type')\n\n def key(msg):\n \"\"\"Return a key for comparison.\"\"\"\n return [msg.type] + [\n getattr(msg, arg) for arg in msg._spec.arguments]\n\n return key(self) == key(other)\n\n def __len__(self):\n if self.type == 'sysex':\n return len(self.data) + 2\n else:\n return self._spec.length\n\n\ndef build_message(spec, bytes):\n \"\"\"Build message from bytes.\n\n This is used by Parser and MidiFile. bytes is a full list\n of bytes for the message including the status byte. For sysex\n messages, the end byte is not included. Examples:\n\n build_message(spec, [0x80, 20, 100])\n build_message(spec, [0xf0, 1, 2, 3])\n\n No type or value checking is done, so you need to do that before you\n call this function. 0xf7 is not allowed as status byte.\n \"\"\"\n message = Message.__new__(Message)\n attrs = message.__dict__\n message.__dict__.update({\n 'type': spec.type,\n '_spec': spec,\n 'time': 0,\n })\n\n # This could be written in a more general way, but most messages\n # are note_on or note_off so doing it this way is faster.\n if spec.type in ['note_on', 'note_off']:\n message.__dict__.update({\n 'channel': bytes[0] & 0x0f,\n 'note': bytes[1],\n 'velocity': bytes[2],\n })\n return message\n\n elif spec.type == 'control_change':\n message.__dict__.update({\n 'channel': bytes[0] & 0x0f,\n 'control': bytes[1],\n 'value': bytes[2],\n })\n return message\n\n elif spec.status_byte < 0xf0:\n # Channel message. The most common type.\n if spec.type == 'pitchwheel':\n pitch = bytes[1] | ((bytes[2] << 7) + MIN_PITCHWHEEL)\n arguments = {'pitch': pitch}\n else:\n arguments = dict(zip(spec.arguments, bytes))\n # Replace status_bytes sneakily with channel.\n arguments['channel'] = bytes[0] & 0x0f\n\n elif spec.type == 'sysex':\n arguments = {'data': tuple(bytes[1:])}\n\n elif spec.type == 'songpos':\n pos = bytes[1] | (bytes[2] << 7)\n arguments = {'pos': pos}\n\n elif spec.type == 'quarter_frame':\n arguments = {'frame_type': bytes[1] >> 4,\n 'frame_value' : bytes[1] & 15}\n\n else:\n arguments = dict(zip(spec.arguments, bytes[1:]))\n\n message.__dict__.update(arguments)\n return message\n\n\ndef parse_time(text):\n if text.endswith('L'):\n raise ValueError('L is not allowed in time')\n\n if PY2:\n converters = [int, long, float]\n else:\n converters = [int, float]\n\n for convert in converters:\n try:\n return convert(text)\n except ValueError:\n pass\n\n raise ValueError('invalid format for time')\n\n\ndef parse_string(text):\n \"\"\"Parse a string of text and return a message.\n\n The string can span multiple lines, but must contain\n one full message.\n\n Raises ValueError if the string could not be parsed.\n \"\"\"\n words = text.split()\n\n message = Message(words[0])\n arguments = words[1:]\n\n names_seen = set()\n\n for argument in arguments:\n try:\n name, value = argument.split('=')\n except ValueError:\n raise ValueError('missing or extraneous equals sign')\n\n if name in names_seen:\n raise ValueError('argument passed more than once')\n names_seen.add(name)\n\n if name == 'data':\n if not value.startswith('(') and value.endswith(')'):\n raise ValueError('missing parentheses in data message')\n\n try:\n data_bytes = [int(byte) for byte in value[1:-1].split(',')]\n except ValueError:\n raise ValueError('unable to parse data bytes')\n setattr(message, 'data', data_bytes)\n elif name == 'time':\n try:\n time = parse_time(value)\n except ValueError:\n raise ValueError('invalid value for time')\n try:\n setattr(message, 'time', time)\n except AttributeError as err:\n raise ValueError(err.message)\n except TypeError as err:\n raise ValueError(err.message)\n else:\n try:\n setattr(message, name, int(value))\n except AttributeError as exception:\n raise ValueError(*exception.args)\n except ValueError:\n raise ValueError('{!r} is not an integer'.format(value))\n\n return message\n\n\ndef parse_string_stream(stream):\n \"\"\"Parse a stram of messages and yield (message, error_message)\n\n stream can be any iterable that generates text strings. If\n a line can be parsed, (message, None) is returned. If it can't\n be parsed (None, error_message) is returned. The error message\n containes the line number where the error occured.\n \"\"\"\n line_number = 1\n for line in stream:\n try:\n line = line.split('#')[0].strip()\n if line:\n yield parse_string(line), None\n except ValueError as exception:\n error_message = 'line {line_number}: {message}'.format(\n line_number=line_number,\n message=exception.args[0])\n yield None, error_message\n line_number += 1\n\n\ndef _format_as_string(message):\n \"\"\"Format a message and return as a string.\n\n There is no reason to call this function directly.\n Use str(message) instead.\n \"\"\"\n if not isinstance(message, Message):\n raise ValueError('message must be a mido.Message object')\n\n words = []\n words.append(message.type)\n\n for name in message._spec.arguments + ('time',):\n value = getattr(message, name)\n if name == 'data':\n value = '({})'.format(','.join([str(byte) for byte in value]))\n elif name == 'time':\n # Python 2 formats longs as '983989385L'. This is not allowed.\n value = str(value)\n value = value.replace('L', '')\n words.append('{}={}'.format(name, value))\n \n return ' '.join(words)\n","sub_path":"mido/messages.py","file_name":"messages.py","file_ext":"py","file_size_in_byte":19063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"270288754","text":"#Reciever\n\nimport socket\nimport sys\n\ndef getLocalIP():\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.connect((\"10.0.0.1\", 80))\n\n localIP = sock.getsockname()[0]\n print(sock.getsockname()[0])\n\n sock.close()\n\n return localIP \n\nif __name__ == \"__main__\":\n\n if (len(sys.argv) < 2): #if the file arg is not passed, exit\n print(\"!Invalid Input Parameters!\\nproper input:\\tpython3 server.py \")\n exit(1)\n\n IP = getLocalIP()\n PORT = 5005\n outfile = open(sys.argv[1], \"a+\")\n\n sock = socket.socket(socket.AF_INET, # Internet\n socket.SOCK_DGRAM) # UDP\n sock.bind((IP, PORT))\n sock.settimeout(5)\n\n nextSeqNumber = 0\n dataReceived = set()\n print(\"Awaiting Data Transfer\")\n while True:\n try:\n data, addr = sock.recvfrom(1024) # buffer size is 1024 bytes\n data = data.decode(\"UTF-8\") #decode the byte sequence\n message = data[1:] #slice off the seq number to get message\n\n #pull the sequence number from the packet and compare\n #to the next expected packet\n seqNum = int(data[0])\n #\n #if a packet is recieved containing \"ffff\" and it is in sequence, close the socket and exit\n if (nextSeqNumber == seqNum) and data[1:] == \"ffff\":\n print(\"Transmission Complete\")\n sock.sendto(bytes(\"close\", encoding='UTF-8'), (addr[0], PORT))\n sock.close()\n exit(0)\n #if the packet is in sequence, and hasnt already been received, write to file\n elif (nextSeqNumber == seqNum) and (data not in dataReceived): #if it is equal to the next expected packet, write\n outfile.write(message)\n dataReceived.add(data)\n nextSeqNumber +=1\n else: #otherwise send a NACK, for a dropped packet\n requestedSeq = nextSeqNumber\n nack = \"N:\"+str(nextSeqNumber)\n sock.sendto(bytes(nack, encoding='UTF-8'), (addr[0], PORT))\n\n if nextSeqNumber == 10: #if the nextSeqNureRequestInProgress = 0mber is 10, reset, a full window will has been recieved\n print(\"ACK Sent\")\n nextSeqNumber = 0\n sock.sendto(bytes(\"ACK\", encoding='UTF-8'), (addr[0], PORT))\n except(socket.timeout):\n print(\"Socket Timed Out, Exiting Program\")\n sock.close()\n exit(1)\n\n","sub_path":"project4/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"103990500","text":"from __future__ import annotations\n\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any, Dict, List, Union\n\nfrom workspace.build_systems.cmake_recipe_mixin import CMakeRecipeMixin\nfrom workspace.util import newer_than, run_with_prefix\n\nfrom .all_recipes import register_recipe\nfrom .klee_libcxxabi import KLEE_LIBCXXABI\nfrom .llvm import LLVM\nfrom .recipe import Recipe\n\nif TYPE_CHECKING:\n import hashlib\n from workspace import Workspace\n\n\nclass KLEE_LIBCXX(Recipe, CMakeRecipeMixin): # pylint: disable=invalid-name\n \"\"\"LLVM's libcxx built for KLEE\"\"\"\n\n profiles = {\n \"release\": {\n \"cmake_args\": {\n 'CMAKE_BUILD_TYPE': 'Release',\n 'LLVM_ENABLE_ASSERTIONS': True,\n },\n \"is_performance_build\": True,\n \"has_debug_info\": False,\n },\n \"rel+debinfo\": {\n \"cmake_args\": {\n 'CMAKE_BUILD_TYPE': 'RelWithDebInfo',\n 'LLVM_ENABLE_ASSERTIONS': True,\n },\n \"c_flags\": [\"-fno-omit-frame-pointer\", \"-g3\", \"-fdebug-types-section\"],\n \"cxx_flags\": [\"-fno-omit-frame-pointer\", \"-g3\", \"-fdebug-types-section\"],\n \"is_performance_build\": True,\n \"has_debug_info\": True,\n },\n \"debug\": {\n \"cmake_args\": {\n 'CMAKE_BUILD_TYPE': 'Debug',\n 'LLVM_ENABLE_ASSERTIONS': True,\n },\n \"c_flags\": [\"-fno-omit-frame-pointer\", \"-g3\", \"-fdebug-types-section\"],\n \"cxx_flags\": [\"-fno-omit-frame-pointer\", \"-g3\", \"-fdebug-types-section\"],\n \"is_performance_build\": False,\n \"has_debug_info\": True,\n },\n }\n\n default_arguments: Dict[str, Any] = {\n \"llvm\": LLVM().default_name,\n \"klee-libcxxabi\": KLEE_LIBCXXABI().default_name,\n }\n\n argument_schema: Dict[str, Any] = {\n \"llvm\": str,\n \"klee-libcxxabi\": str,\n }\n\n def find_llvm(self, workspace: Workspace) -> LLVM:\n return self._find_previous_build(workspace, \"llvm\", LLVM)\n\n def find_klee_libcxxabi(self, workspace: Workspace) -> KLEE_LIBCXXABI:\n return self._find_previous_build(workspace, \"klee-libcxxabi\", KLEE_LIBCXXABI)\n\n def _get_wllvm_env(self, workspace: Workspace) -> Dict[str, str]:\n env = workspace.get_env()\n\n llvm = self.find_llvm(workspace)\n\n env['LLVM_COMPILER'] = \"clang\"\n env['LLVM_COMPILER_PATH'] = str(llvm.paths[\"bin_dir\"])\n\n return env\n\n def __init__(self, **kwargs):\n CMakeRecipeMixin.__init__(self)\n Recipe.__init__(self, **kwargs)\n\n def setup(self, workspace: Workspace):\n klee_libcxxabi = self.find_klee_libcxxabi(workspace)\n assert klee_libcxxabi.paths[\"libcxx_src_dir\"].exists(), \"Could not find 'libcxx' sources\"\n\n def initialize(self, workspace: Workspace):\n Recipe.initialize(self, workspace)\n CMakeRecipeMixin.initialize(self, workspace)\n\n klee_libcxxabi = self.find_klee_libcxxabi(workspace)\n\n self.paths[\"src_dir\"] = klee_libcxxabi.paths[\"libcxx_src_dir\"]\n self.paths[\"include_dir\"] = self.paths[\"src_dir\"] / \"include\"\n self.paths[\"lib_dir\"] = self.paths[\"build_dir\"] / \"lib\"\n self.paths[\"libcxx.so\"] = self.paths[\"lib_dir\"] / \"libc++.so.1.0\"\n self.paths[\"libcxx.bc\"] = self.paths[\"lib_dir\"] / \"libc++.so.1.0.bc\"\n self.paths[\"klee_libcxx.bc\"] = self.paths[\"lib_dir\"] / \"libc++.so.bc\"\n\n CMakeRecipeMixin.set_build_env(self, self._get_wllvm_env(workspace))\n CMakeRecipeMixin.set_use_ccache(self, False)\n CMakeRecipeMixin.set_build_targets(self, [\"cxx\"])\n\n def _compute_digest(self, workspace: Workspace, digest: \"hashlib._Hash\") -> None:\n Recipe.compute_digest(self, workspace, digest)\n CMakeRecipeMixin.compute_digest(self, workspace, digest)\n\n digest.update(self.find_llvm(workspace).digest)\n digest.update(self.find_klee_libcxxabi(workspace).digest)\n\n def configure(self, workspace: Workspace) -> None:\n llvm = self.find_llvm(workspace)\n klee_libcxxabi = self.find_klee_libcxxabi(workspace)\n\n self.cmake.set_flag('CMAKE_C_COMPILER', 'wllvm')\n self.cmake.set_flag('CMAKE_CXX_COMPILER', 'wllvm++')\n self.cmake.set_flag('LLVM_CONFIG_PATH', llvm.paths[\"llvm-config\"])\n\n self.cmake.set_flag('LIBCXX_CXX_ABI', 'libcxxabi')\n self.cmake.set_flag('LIBCXX_CXX_ABI_INCLUDE_PATHS', klee_libcxxabi.paths[\"include_dir\"])\n self.cmake.set_flag('LIBCXX_CXX_ABI_LIBRARY_PATH', klee_libcxxabi.paths[\"lib_dir\"])\n self.cmake.set_flag('LIBCXX_ENABLE_ABI_LINKER_SCRIPT', False)\n self.cmake.set_flag('LIBCXX_ENABLE_SHARED', True)\n self.cmake.set_flag('LIBCXX_INCLUDE_BENCHMARKS', False)\n self.cmake.set_flag('LIBCXX_ENABLE_THREADS', False)\n\n config_env: Dict[str, str] = dict(CMakeRecipeMixin.get_build_env(self))\n config_env[\"WLLVM_CONFIGURE_ONLY\"] = \"ON\"\n CMakeRecipeMixin.set_configure_env(self, config_env)\n\n def build(self, workspace: Workspace):\n klee_libcxxabi = self.find_klee_libcxxabi(workspace)\n llvm = self.find_llvm(workspace)\n\n CMakeRecipeMixin.build(self, workspace)\n\n if not newer_than(target=self.paths[\"libcxx.bc\"], others=[self.paths[\"libcxx.so\"]]):\n extract_bc_cmd: List[Union[str, Path]] = [\n \"extract-bc\", \"--linker\", llvm.paths[\"llvm-link\"], \"--archiver\", llvm.paths[\"llvm-ar\"], \"-o\",\n self.paths[\"libcxx.bc\"], self.paths[\"libcxx.so\"]\n ]\n run_with_prefix(extract_bc_cmd, self.output_prefix, check=True, cwd=self.paths[\"build_dir\"])\n\n if not newer_than(target=self.paths[\"klee_libcxx.bc\"],\n others=[self.paths[\"libcxx.bc\"], klee_libcxxabi.paths[\"libcxxabi.bc\"]]):\n link_cmd: List[Union[str, Path]] = [\n llvm.paths[\"llvm-link\"], \"-o\", self.paths[\"klee_libcxx.bc\"], self.paths[\"libcxx.bc\"],\n klee_libcxxabi.paths[\"libcxxabi.bc\"]\n ]\n run_with_prefix(link_cmd, self.output_prefix, check=True, cwd=self.paths[\"build_dir\"])\n\n def add_to_env(self, env, workspace: Workspace):\n pass\n\n\nregister_recipe(KLEE_LIBCXX)\n","sub_path":"ws-src/workspace/recipes/klee_libcxx.py","file_name":"klee_libcxx.py","file_ext":"py","file_size_in_byte":6226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"477210479","text":"#------------------------------------\n# Name: Zachary Chin\n# For: Lab03 for CS-300-A\n# Date: February 26, 2020\n#------------------------------------\n\n# import modules\nimport time\nimport RPi.GPIO as GPIO\n\n# set GPIO pin labelling to BCM\nGPIO.setmode(GPIO.BCM)\n\n# Set up GPIO pins to input/output\nGPIO.setup(12, GPIO.IN, pull_up_down = GPIO.PUD_UP) # set up switch as input (initial state is UP/HIGH/TRUE)\nGPIO.setup(16, GPIO.OUT) # set up LED as output\n\nstate = 1 # Keeps track of the last state of the input \n\n#------------------------------------------------------------\n# Callback function\n# Input: BCM pin number\n# Output: The channel that the edge was detected on && time\n#------------------------------------------------------------\ndef my_callback(channel):\n print('Edge detected on BCM %s'%channel)\n print(time.time())\n\n# Calls the callback function whenever the switch is released\n# Releasing the switch causes a rising edge (LOW -> HIGH)\nGPIO.add_event_detect(12, GPIO.RISING, callback = my_callback, bouncetime = 300)\n\n# Set startTime equal to the current time\nstartTime = time.time()\n\n\n# Runs the loop while the time elapsed (time.time() - startTime) without\n# any action (e.g. pressing the switch) is less than 5 seconds\n# Once 5 seconds has passed without any action, the while loop ends\nwhile (time.time() - startTime) < 5:\n \n # If the switch is pressed down and state == 1\n if GPIO.input(12) == False and state == 1:\n startTime = time.time() # Update startTime to the current time\n GPIO.output(16, True) # Turn on the LED\n time.sleep(0.5)\n state = 0\n\n # If the switch is pressed down and state == 0\n if GPIO.input(12) == False and state == 0:\n startTime = time.time() # Update startTime to the current time\n GPIO.output(16, False) # Turn off the LED\n time.sleep(0.5)\n state = 1\n\nprint(\"User inactive for too long, quitting program. Goodbye!\")\n\n# Clean up GPIO\nGPIO.cleanup()","sub_path":"Labs/Lab03/switchled.py","file_name":"switchled.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"589191593","text":"import platform\nimport subprocess\nfrom tempfile import NamedTemporaryFile\nfrom typing import List\n\n\ndef launch(command: List[str]):\n result = subprocess.Popen(command, close_fds=True)\n return result\n\n\ndef launch_terminal(command: List[str]):\n command[0] = '\"' + command[0] + '\"'\n cmd = ' '.join(command)\n operating_system = platform.system()\n if operating_system == 'Darwin':\n with NamedTemporaryFile(suffix='-lnd.command', delete=False) as f:\n f.write(f'#!/bin/sh\\n{cmd}\\n'.encode('utf-8'))\n f.flush()\n subprocess.call(['chmod', 'u+x', f.name])\n subprocess.Popen(['open', '-W', f.name], close_fds=True)\n\n\nclass NodeLauncher(object):\n def __init__(self, command_generator, launch_fn=launch,\n launch_terminal_fn=launch_terminal):\n self.command_generator = command_generator\n self.launch = launch_fn\n self.launch_terminal = launch_terminal_fn\n\n def launchTestnetBitcoinQtNode(self):\n result = self.launch(self.command_generator.testnet_bitcoin_qt())\n return result\n\n def launchMainnetBitcoinQtNode(self):\n result = self.launch(self.command_generator.mainnet_bitcoin_qt())\n return result\n\n def launchTestnetLndNode(self):\n result = self.launch_terminal(self.command_generator.testnet_lnd())\n return result\n\n def launchMainnetLndNode(self):\n result = self.launch_terminal(self.command_generator.mainnet_lnd())\n return result\n","sub_path":"node_launcher/node_launcher.py","file_name":"node_launcher.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"11477927","text":"from scrapy import Spider, Selector, Request\nfrom urllib.parse import urljoin\nfrom crawl.items.weiwangitem import WeiwangItem\n\nclass weiwangspider(Spider):\n name = \"weiwang\"\n start_urls = [\n 'https://price.pcauto.com.cn/price/nb643/'\n ]\n\n def parse(self, response):\n sel = Selector(response)\n bigdiv = sel.css('div.thBig')\n for div in bigdiv:\n carname = div.css('p.tit a::text').extract_first()\n url = div.css('p.tit a::attr(href)').extract_first()\n url = urljoin(response.url, url)\n canurl = response.xpath('//p[@class=\"lin linA\"]/a[contains(text(), \"参配\")]/@href').extract_first()\n canurl = urljoin(response.url, canurl)\n yield Request(url=url, callback=self.car_content, meta={'carname': carname})\n yield Request(url=canurl, callback=self.can_content)\n # carname = response.css('p.tit a::text').extract()\n # urls = response.css('p.tit a::attr(href)').extract()\n\n\n def car_content(self, response):\n item = WeiwangItem()\n sel = Selector(response)\n item['name'] = response.meta.get('carname')\n item['price'] = sel.css('em.gf ::text').extract_first()\n item['displacement'] = sel.css('ul.des p em a::text').re(r'\\d+.\\d+L')\n # item['level'] = sel.css('p a::text').extract()[2]\n # item['transmission'] = sel.css('p a::text').extract[3]\n # item['type'] = sel.css('p a::text').extract[4]\n # print(item['name'], item['price'], item['displacement'])\n yield item\n\n def can_content(self, response):\n sel = Selector(response)\n\n pass","sub_path":"crawl/spiders/wangshilong/weiwang.py","file_name":"weiwang.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"108452652","text":"#!/usr/bin/python3\n\nimport json\nimport sys\nfrom messageapi.broker import BrokerSubscriber\nfrom messageapi.flood import FloodSubscriber\n\n\nsystem = {\"FLOOD\": FloodSubscriber,\n \"BROKER\": BrokerSubscriber}\n\nclass WeatherSubscriber:\n\n def __init__(self, api, topic=\"00000\",history=\"0\"):\n self.sub = system[api](topic=topic, history=history)\n self.topic = topic\n self.sub.register_sub()\n\n def run(self):\n print(\"Running subscriber application...\")\n total_temp = []\n while len(total_temp) < self.sub.history:\n message = self.sub.notify()\n if message is not None:\n message = self.sub.notify()\n # print(\"Suscriber Application got message.\")\n # temperature, relhumidity = string.split(\" \")\n try:\n data = json.loads(message)\n temperature = data['temperature']\n total_temp.append(int(temperature))\n except:\n pass\n print(\"Average temperature for zipcode '%s' was %dF\" % (self.topic, sum(total_temp) / max(len(total_temp), 1)))\n\n\ndef main():\n topic_filter = sys.argv[1] if len(sys.argv) > 1 else \"90210\"\n api = sys.argv[2] if len(sys.argv) > 2 else \"BROKER\"\n history = sys.argv[3] if len (sys.argv) >3 else \"5\"\n \n if api not in system:\n print(\"Usage error -- message API can either be FLOOD or BROKER\")\n sys.exit(-1)\n if not topic_filter.isdigit() or len(topic_filter) != 5:\n print(\"Usage error -- topic must be a zipcode (all numbers, 5 total).\")\n sys.exit(-1)\n\n ws = WeatherSubscriber(api, topic=topic_filter, history=history)\n while True:\n ws.run()\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n","sub_path":"subscriber.py","file_name":"subscriber.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"354439581","text":"#!/usr/bin/env python3\n\nimport sys, os, argparse\nimport numpy as np\n\n############\nparser =argparse.ArgumentParser(description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter)\nparser.add_argument('chains', type=str)\nparser.add_argument('nfrag', type=int)\nparser.add_argument('lrmsd', nargs='+', help=\"list of frag lrmsd\")\nparser.add_argument('--average', action=\"store_true\")\nargs = parser.parse_args()\n############\n\nnfrag = args.nfrag\ncc = [ l for l in open(args.chains).readlines()]\nprint(cc[0],end=\"\")\ncc = cc[1:]\ncc = [l.split() for l in cc]\nchains = [ [int(i)-1 for i in l[-nfrag:]] for l in cc]\nprint(args.lrmsd, file=sys.stderr)\nlrmsds = [ [float(l.split()[-1]) for l in open(f).readlines()] for f in args.lrmsd]\n\nif args.average:\n for cnr, c in enumerate(chains):\n for p in cc[cnr][:-nfrag]:\n print(p, end=\" \")\n for p in c:\n print(p, end=\" \")\n rms = []\n for np, p in enumerate(c):\n rms.append(lrmsds[np][p])\n a = [sum(rms**2)/nfrag]**0.5\n print(a)\n\nelse:\n for cnr, c in enumerate(chains):\n for p in cc[cnr][:-nfrag]:\n print(p, end=\" \")\n for p in c:\n print(p, end=\" \")\n for fr, p in enumerate(c):\n print(lrmsds[fr][p], end=\" \")\n print(\"\")\n","sub_path":"rmsd_chains.py","file_name":"rmsd_chains.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"322578093","text":"# -*- coding: utf-8 -*-\n#\n# Moulynex : Static Analysis for Python\n# Started on 04-22-2014\n# By Claire Vacherot\n\n\"\"\"Class for results storage and output.\"\"\"\n\nclass MyxResults(object):\n \"\"\"This class stores results from tests stored in MyxResult objects\n and output them in the format defined by the user.\n \"\"\"\n __results = None # List of results\n\n def __init__(self):\n self.__results = []\n\n def append(self, result):\n \"\"\"Add a MyxResult object to the list of results to print.\"\"\"\n if isinstance(result, MyxResult):\n self.__results.append(result)\n\n def is_duplicate(self, typ, msg, loc, val):\n \"\"\"Check if the list contains a MyxResult object with the \n exact same attributes to avoid duplicate entries.\n \"\"\"\n for result in self.__results:\n if result.get('type') == typ and \\\n result.get('message') == msg and \\\n result.get('location') == loc and \\\n result.get('value') == val:\n return True\n return False\n\n def get_results(self):\n return self.__results\n\n def output(self, display=None):\n \"\"\"Display results in the defined format.\"\"\"\n for result in self.__results:\n result.output(display)\n\n\nclass MyxResult(object):\n \"\"\"Class to store the result of one single test.\"\"\"\n __type = ''\n __message = ''\n __location = None\n __value = ''\n\n def __init__(self, typ, msg, loc, val):\n self.__type = typ\n self.__message = msg\n self.__location = loc\n self.__value = val\n\n def get(self, keyword):\n \"\"\"Gets the value associated to keyword.\"\"\"\n if keyword == 'type':\n return self.__type\n if keyword == 'message':\n return self.__message\n if keyword == 'location':\n return self.__location\n if keyword == 'value':\n return self.__value\n return None\n\n def set(self, keyword, value):\n \"\"\"Sets a value to keyword.\"\"\"\n if keyword == 'type':\n self.__type = value\n elif keyword == 'message':\n self.__message = value\n elif keyword == 'location':\n self.__location = value\n elif keyword == 'value':\n self.__value = value\n\n def output(self, display=None):\n \"\"\"Prints the result of the test.\"\"\"\n res = '[{0}] {1} {2} ({3})'.format(self.__type, self.__message,\n self.__location, self.__value)\n if display == None:\n print(res)\n else:\n raise moulynex.ResultError(display)\n","sub_path":"myx_results.py","file_name":"myx_results.py","file_ext":"py","file_size_in_byte":2660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"580988518","text":"# encoding: utf-8\n# @author: yinqianjun\n# @file: Market_Action.py\n# @time: 2020/5/17 20:55\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom WoniuBoss_GUI_Test.tools.service import Service\nimport time\nclass Market:\n def __init__(self,driver):\n self.driver = driver\n self.driver.implicitly_wait(10)\n #进入资源管理—简历资源模块\n def into_resource(self):\n Service.open_page(self.driver)\n Service.only_login(self.driver)\n time.sleep(2)\n Service.into_module(self.driver,'市场营销')\n Service.into_module(self.driver, '简历资源')\n\n #选择区域\n def input_area(self,area):\n ele = self.driver.find_element_by_xpath('/html/body/div[11]/div/div/form/div/div/div[1]/select')\n Service.droplist(ele,area)\n\n #输入电话\n def input_phone(self,phone):\n ele =self.driver.find_element_by_xpath('/html/body/div[11]/div/div/form/div/div/div[3]/input')\n ele.click()\n ele.send_keys(phone)\n\n #选择部门\n def input_partment(self,partment):\n ele = self.driver.find_element_by_xpath('/html/body/div[11]/div/div/form/div/div/div[2]/select')\n Service.droplist(ele, partment)\n\n #输入姓名\n def input_name(self,name):\n ele = self.driver.find_element_by_xpath('/html/body/div[11]/div/div/form/div/div/div[4]/input')\n ele.send_keys(name)\n\n #选择性别\n def input_sex(self,sex):\n ele = self.driver.find_element_by_xpath('/html/body/div[11]/div/div/form/div/div/div[5]/select')\n Service.droplist(ele, sex)\n\n #选择状态\n def input_status(self,status):\n ele = self.driver.find_element_by_xpath('/html/body/div[11]/div/div/form/div/div/div[6]/select')\n Service.droplist(ele, status)\n\n #输入微信\n def input_wx(self,wx):\n ele = self.driver.find_element_by_xpath('/html/body/div[11]/div/div/form/div/div/div[7]/input')\n ele.click()\n ele.send_keys(wx)\n\n #输入qq\n def input_qq(self,qq):\n ele =self.driver.find_element_by_xpath('/html/body/div[11]/div/div/form/div/div/div[8]/input')\n ele.click()\n ele.send_keys(qq)\n\n #输入学校\n def input_school(self,school):\n ele = self.driver.find_element_by_xpath('/html/body/div[11]/div/div/form/div/div/div[9]/input')\n ele.send_keys(school)\n\n #选择学历\n def input_education(self,education):\n ele = self.driver.find_element_by_xpath('/html/body/div[11]/div/div/form/div/div/div[10]/select')\n Service.droplist(ele, education)\n\n #输入专业\n def input_major(self,major):\n ele = self.driver.find_element_by_xpath('/html/body/div[11]/div/div/form/div/div/div[11]/input')\n ele.send_keys(major)\n\n #输入工作年限\n def input_workage(self,workage):\n ele = self.driver.find_element_by_xpath('/html/body/div[11]/div/div/form/div/div/div[12]/select')\n Service.droplist(ele, workage)\n\n #输入年龄\n def input_age(self,age):\n ele = self.driver.find_element_by_xpath('/html/body/div[11]/div/div/form/div/div/div[13]/input')\n ele.send_keys(age)\n\n #输入渠道来源\n def input_source(self,source):\n ele = self.driver.find_element_by_xpath('/html/body/div[11]/div/div/form/div/div/div[14]/select')\n Service.droplist(ele, source)\n\n #输入教育经历\n def input_eduexp(self,eduexp):\n ele = self.driver.find_element_by_xpath('/html/body/div[11]/div/div/form/div/div/div[15]/textarea')\n ele.send_keys(eduexp)\n\n #输入工作经历\n def input_experience(self,experience):\n ele = self.driver.find_element_by_xpath('/html/body/div[11]/div/div/form/div/div/div[16]/textarea')\n ele.send_keys(experience)\n\n #输入最后跟踪内容\n def input_tracking(self,tracking):\n ele = self.driver.find_element_by_xpath('/html/body/div[11]/div/div/form/div/div/div[17]/textarea')\n ele.send_keys(tracking)\n\n #增加简历资源\n def add_resource(self,resource_info):\n self.into_resource()\n self.driver.find_element_by_xpath('/html/body/div[8]/div[2]/div[2]/button[4]').click()\n self.input_phone(resource_info['phone'])\n self.input_name(resource_info['name'])\n self.input_age(resource_info['age'])\n self.input_area(resource_info['area'])\n self.input_partment(resource_info['partment'])\n self.input_education(resource_info['education'])\n self.input_eduexp(resource_info['eduexp'])\n self.input_experience(resource_info['experience'])\n self.input_sex(resource_info['sex'])\n self.input_school(resource_info['school'])\n self.input_source(resource_info['source'])\n self.input_status(resource_info['status'])\n self.input_workage(resource_info['workage'])\n self.input_wx(resource_info['wx'])\n self.input_qq(resource_info['qq'])\n self.input_major(resource_info['major'])\n self.driver.find_element_by_id('addCusBtn').click()\n if Service.is_element_present(self.driver,'xpath','/html/body/div[12]/div/div/div[3]/button'):\n return True\n else:\n return False\n\n #输入查询内容\n def input_value(self,content):\n self.driver.find_element_by_xpath('/html/body/div[8]/div[2]/div[2]/input[3]').send_keys(content)\n\n #根据电话qq微信姓名精确查询\n def search_resource(self,info):\n self.into_resource()\n self.input_value(info['value'])\n self.driver.find_element_by_xpath('/html/body/div[8]/div[2]/div[2]/button[1]').click()\n if Service.is_element_present(self.driver,'xpath',\"//td[12]/button[@class='btn btn-info']\")==True:\n return True\n else:\n return False\n\n #上传简历\n def upload_resource(self,info):\n self.into_resource()\n self.driver.find_element_by_xpath(\"/html/body/div[8]/div[2]/div[2]/button[3]\").click()#点击上传按钮\n area=self.driver.find_element_by_xpath(\"//select[@id='regionSelect']\")#区域下拉框\n Service.droplist(area,info['area'])\n partment=self.driver.find_element_by_xpath(\"//select[@id='dpetSelect']\")#部门下拉框\n Service.droplist(partment, info['partment'])\n self.driver.find_element_by_xpath('//*[@id=\"files\"]').send_keys(info['path'])#传入文件路径\n self.driver.find_element_by_xpath(\"//div[@class='modal-footer']/button[@class='btn btn-primary btn-padding']\").click()\n msg = self.driver.find_element_by_xpath(\"//div[@class='bootbox-body']\").get_attribute(\"innerHTML\")\n return msg\nif __name__ == '__main__':\n driver = Service.get_driver()\n mk =Market(driver)\n info = {'path': 'D:\\\\woniuboss接口信息.xls','area':'成都','partment':'咨询部'}\n mk.upload_resource(info)\n\n","sub_path":"4.School _GUI_Test/lib/Market/Market_Action.py","file_name":"Market_Action.py","file_ext":"py","file_size_in_byte":6802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"653400889","text":"\"\"\"\nSome say that every one year of a human’s life is equivalent to seven years of a dog’s life.\nHence, I decided to write a function called dog_years that has two parameters named name and age.\nThis function computes the age in dog years and return your dog name and dog_years age.\ncondition: \n1. Dog name is consider of letters only.\n2. Dog age must be whole number and more than 0\n\"\"\"\n\n\ndef dog_years(name, age):\n return \"{}, you are {} years old in dog years\".format(name, age*7)\n\n\nprint(\"This is a dog year program which calculates a dog’s age in dog years.\\n\")\nwhile True:\n name = input(\"Please enter your dog name: \")\n if name.isalpha():\n break\n print(\"invalid name. please re-type your dog name.\\n\")\n\nwhile True:\n age = input(\"Please enter your dog age: \")\n if age.isnumeric() and int(age) >= 0:\n break\n print(\"invalid age. please re-type your dog age.\\n\")\nprint(dog_years(name, int(age)))\n","sub_path":"Dog Years.py","file_name":"Dog Years.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"441474441","text":"import unittest\nimport pandas as pd\nimport dash_html_components as html\nfrom pytz import timezone\nfrom utils import *\n\ntz = timezone('US/Pacific')\nnow = datetime.datetime.now(tz)\ndate = now.strftime(\"%Y%m%d\")\n\nclass TestUtils(unittest.TestCase):\n \n def test_get_games(self):\n self.assertEqual(len(get_games(date)[0]), 8)\n self.assertEqual(len(get_games(date)[1]), 3)\n\n def test_fetch_box(self):\n self.assertEqual(fetch_box('0041700155')[1]['ta'], 'SAS')\n self.assertEqual(fetch_box('0041700155')[2]['ta'], 'GSW')\n \n def test_advStats_table(self):\n self.assertIsInstance(advStats_table(pd.DataFrame()), html.Table)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"run_tests.py","file_name":"run_tests.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"248041491","text":"from aws_infrastructure.tasks import compose_collection\nimport aws_infrastructure.tasks.library.codebuild_instance\nimport aws_infrastructure.tasks.library.terraform\nfrom collections import namedtuple\nfrom invoke import Collection\nfrom invoke import task\nimport os\nfrom pathlib import Path\nimport ruamel.yaml\nimport shutil\nfrom typing import List\nfrom typing import Union\n\n\ndef _apply_pre_exec(\n *,\n terraform_dir: Path,\n instances: List[str],\n codebuild_environment_variables_factory, # Dictionary from string to function that returns dictionary\n):\n def apply_pre_exec(\n *,\n context,\n params,\n ):\n \"\"\"\n Prepare CodeBuild archives that Terraform can upload.\n \"\"\"\n\n # Create an archive of each source that Terraform can upload to S3\n for instance_current in instances:\n path_source = Path(terraform_dir, instance_current)\n path_staging = Path(terraform_dir, 'staging', instance_current)\n\n # Copy source into a staging directory\n shutil.rmtree(path=path_staging, ignore_errors=True)\n shutil.copytree(src=path_source, dst=path_staging)\n\n # Determine whether we need to update the buildspec.yml with environment variables\n if codebuild_environment_variables_factory != None and instance_current in codebuild_environment_variables_factory:\n # Obtain the variables we need to update in the buildspec.yml\n codebuild_environment_variables_current = codebuild_environment_variables_factory[instance_current](context=context)\n\n # Use a parsing object for roundtrip\n yaml_parser = ruamel.yaml.YAML()\n path_buildspec = Path(path_staging, 'buildspec.yml')\n\n # Update the buildspec to add provided environment variables\n with open(path_buildspec) as file_buildspec:\n yaml_buildspec = yaml_parser.load(file_buildspec)\n\n # Ensure the buildspec provides for environment variables\n if 'env' not in yaml_buildspec:\n yaml_buildspec['env'] = {}\n if 'variables' not in yaml_buildspec['env']:\n yaml_buildspec['env']['variables'] = {}\n\n # Add the variables\n for key_current, value_current in codebuild_environment_variables_current.items():\n yaml_buildspec['env']['variables'][key_current] = value_current\n\n # Replace the buildspec\n os.remove(path_buildspec)\n with open(path_buildspec, mode='w') as file_buildspec:\n yaml_parser.dump(yaml_buildspec, file_buildspec)\n\n # Make the archive\n shutil.make_archive(\n base_name=path_staging,\n format='zip',\n root_dir=path_staging\n )\n\n # Remove the staged source directory\n shutil.rmtree(\n path=path_staging,\n ignore_errors=True\n )\n\n return apply_pre_exec\n\n\ndef _destroy_post_exec(\n *,\n terraform_dir: Path,\n instances: List[str],\n):\n def destroy_post_exec(\n *,\n context,\n params,\n ):\n \"\"\"\n Remove CodeBuild archives.\n \"\"\"\n\n # Clean up the archives\n for instance_current in instances:\n os.remove(Path(terraform_dir, 'staging', instance_current + '.zip'))\n\n return destroy_post_exec\n\n\ndef create_tasks(\n *,\n config_key: str,\n terraform_bin: Union[Path, str],\n terraform_dir: Union[Path, str],\n instances: List[str],\n codebuild_environment_variables_factory,\n):\n \"\"\"\n Create all of the tasks, re-using and passing parameters appropriately.\n \"\"\"\n\n terraform_bin = Path(terraform_bin)\n terraform_dir = Path(terraform_dir)\n\n ns_codebuild = Collection('codebuild')\n\n ns_terraform = aws_infrastructure.tasks.library.terraform.create_tasks(\n config_key=config_key,\n terraform_bin=terraform_bin,\n terraform_dir=terraform_dir,\n apply_pre_exec=_apply_pre_exec(\n terraform_dir=terraform_dir,\n instances=instances,\n codebuild_environment_variables_factory=codebuild_environment_variables_factory,\n ),\n destroy_post_exec=_destroy_post_exec(\n terraform_dir=terraform_dir,\n instances=instances\n ),\n )\n\n compose_collection(\n ns_codebuild,\n ns_terraform,\n sub=False,\n )\n\n # Create tasks associated with our instances\n for instance_current in instances:\n ns_instance = aws_infrastructure.tasks.library.codebuild_instance.create_tasks(\n config_key='{}.{}'.format(config_key, instance_current),\n task_apply=ns_codebuild.tasks['apply'],\n instance=instance_current,\n )\n\n compose_collection(ns_codebuild, ns_instance)\n\n return ns_codebuild\n","sub_path":"aws_infrastructure/aws_infrastructure/tasks/library/codebuild.py","file_name":"codebuild.py","file_ext":"py","file_size_in_byte":4967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"561926616","text":"import json\nfrom datetime import timedelta\n\nfrom django.contrib import messages\nfrom django.contrib.auth.views import redirect_to_login\nfrom django.core.urlresolvers import reverse\nfrom django.db.models import Q\nfrom django.shortcuts import redirect\nfrom django.utils.timezone import now\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.generic import View\n\nfrom pretix.base.models import (\n CartPosition, EventLock, Item, ItemVariation, Quota,\n)\nfrom pretix.base.services.cart import (\n CartError, add_items_to_cart, remove_items_from_cart,\n)\nfrom pretix.presale.views import (\n EventViewMixin, LoginOrGuestRequiredMixin, user_cart_q,\n)\n\n\nclass CartActionMixin:\n\n def get_next_url(self):\n if \"next\" in self.request.GET and '://' not in self.request.GET:\n return self.request.GET.get('next')\n elif \"HTTP_REFERER\" in self.request.META:\n return self.request.META.get('HTTP_REFERER')\n else:\n return reverse('presale:event.index', kwargs={\n 'event': self.request.event.slug,\n 'organizer': self.request.event.organizer.slug,\n })\n\n def get_success_url(self):\n return self.get_next_url()\n\n def get_failure_url(self):\n return self.get_next_url()\n\n def _items_from_post_data(self):\n \"\"\"\n Parses the POST data and returns a list of tuples in the\n form (item id, variation id or None, number)\n \"\"\"\n items = []\n for key, value in self.request.POST.items():\n if value.strip() == '':\n continue\n if key.startswith('item_'):\n try:\n items.append((key.split(\"_\")[1], None, int(value)))\n except ValueError:\n messages.error(self.request, _('Please enter numbers only.'))\n return []\n elif key.startswith('variation_'):\n try:\n items.append((key.split(\"_\")[1], key.split(\"_\")[2], int(value)))\n except ValueError:\n messages.error(self.request, _('Please enter numbers only.'))\n return []\n if len(items) == 0:\n messages.warning(self.request, _('You did not select any products.'))\n return []\n return items\n\n\nclass CartRemove(EventViewMixin, CartActionMixin, LoginOrGuestRequiredMixin, View):\n\n def post(self, *args, **kwargs):\n items = self._items_from_post_data()\n if not items:\n return redirect(self.get_failure_url())\n\n remove_items_from_cart(self.request.event.identity, items, self.request.user.id,\n self.request.session.session_key)\n messages.success(self.request, _('Your cart has been updated.'))\n return redirect(self.get_success_url())\n\n\nclass CartAdd(EventViewMixin, CartActionMixin, View):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n items = self._items_from_post_data()\n\n # We do not use LoginRequiredMixin here, as we want to store stuff into the\n # session before redirecting to login\n if not request.user.is_authenticated() and 'guest_email' not in request.session:\n request.session['cart_tmp'] = json.dumps(items)\n return redirect_to_login(\n self.get_success_url(), reverse('presale:event.checkout.login', kwargs={\n 'organizer': request.event.organizer.slug,\n 'event': request.event.slug,\n }) + '?guest=1', 'next'\n )\n\n return self.process(items)\n\n def process(self, items):\n try:\n add_items_to_cart(self.request.event.identity, items, self.request.user.id,\n self.request.session.session_key)\n messages.success(self.request, _('The products have been successfully added to your cart.'))\n return redirect(self.get_success_url())\n except CartError as e:\n messages.error(self.request, str(e))\n return redirect(self.get_failure_url())\n","sub_path":"src/pretix/presale/views/cart.py","file_name":"cart.py","file_ext":"py","file_size_in_byte":4181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"228721152","text":"import logging\nimport sys\n\nformatter = logging.Formatter('[%(asctime)s -- %(levelname)s]\\t%(message)s')\n\nhandler = logging.StreamHandler(sys.stdout)\nhandler.setFormatter(formatter)\n\nlogger = logging.getLogger(__name__)\nlogger.addHandler(handler)\nlogger.setLevel(logging.DEBUG)\n\n__all__ = ['logger']\n","sub_path":"bot/src/utils/logging.py","file_name":"logging.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"259809254","text":"\"\"\"\nTest regular expression for matching shard end points.\n\"\"\"\n\nfrom pyesgf.search.consts import SHARD_REXP\nimport re\n\ntests = [\n\"https://esgf-test.a.b.c/solr\",\n\"http://esgf.a.c/solr/data\",\n\"http://esgs.a.d:80/data/solr\",\n\"esgf.a.c:80/solr\",\n\"esgf.a.c/solr\"\n]\n\nexpected = [\n(\"https://\", \"esgf-test.a.b.c\", None, \"solr\"),\n(\"http://\", \"esgf.a.c\", None, \"solr/data\"),\n(\"http://\", \"esgs.a.d\", \"80\", \"data/solr\"),\n(None, \"esgf.a.c\", \"80\", \"solr\"),\n(None, \"esgf.a.c\", None, \"solr\")\n]\n\nkeys = (\"prefix\", \"host\", \"port\", \"suffix\") \n\nR = re.compile(\"^(?Phttps?://)?(?P.+?):?(?P\\d+)?/(?P.+)$\")\n\ndef test_regex():\n for i, test in enumerate(tests):\n \n match = R.match(test)\n d = match.groupdict()\n values = tuple([d[key] for key in keys])\n\n assert values == expected[i]\n\n\nif __name__ == \"__main__\":\n test_regex()\n\n","sub_path":"test/test_shard_regex.py","file_name":"test_shard_regex.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"396995381","text":"from django.conf.urls import include, url\n\nfrom django.contrib import admin\nadmin.autodiscover()\nimport groupmanager.views\n\n# Examples:\n# url(r'^$', 'groupmanager.views.home', name='home'),\n# url(r'^blog/', include('blog.urls')),\n\nurlpatterns = [\n url(r'^$', groupmanager.views.index, name='index'),\n url(r'^groupmanager/', include('groupmanager.urls')),\n url(r'^budget/', include('budget.urls')),\n url(r'^admin/', admin.site.urls),\n]\n","sub_path":"mysite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"498438002","text":"from __future__ import print_function\nfrom keras.callbacks import LambdaCallback\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom keras.optimizers import RMSprop\nimport numpy as np\nimport sys\nimport io\nimport random\nimport argparse\n\n\n\n\"\"\"\n modes:\n 'init' :: train without loading in weights, and write to weights\n 'train' :: load weights from weights, train, and then wright back to weights\n 'sample' :: don't train and instead print a sample\n\"\"\"\n\nparser = argparse.ArgumentParser(description=\"An LSTM\")\nparser.add_argument('mode', choices=['init', 'train', 'sample'], default='sample',\n help= \"Indicate whether program should be in 'int', 'train', or 'sample' mode\")\n #dest is mode ??\nparser.add_argument('--sample', type=int, default=1000,\n help=\"The number of characters to be generated when sampling. Default = 1000\")\n #maybe also allow temperature of samples to be chosen\n #only if sample mode?\nparser.add_argument('--epoch', type=int, default=3,\n help='Indicate the number of epochs that the model should be trained on. Default = 3')\n #only if not sample mode ??\nparser.add_argument('--step', type=int, default=5,\n help='The step size for creating sentences from corpus. Default = 5')\nparser.add_argument('--sampleSize', type=int, default=(-1),\n help=\"The maximum number of characters from the corpus to be trained on. Default is one million\")\n#parser.add_argument('--corpus', type=argparse.FileType('r'), default='Lyrics.txt',\n# help=\"Indicate the corpus source file if not 'Lyrics.txt'\")\nparser.add_argument('--corpus', type=str, default='Lyrics.txt',\n help=\"Indicate the corpus source file if not 'Lyrics.txt'\")\nargs = parser.parse_args()\n\nmode = args.mode\nprintSize = args.sample\nnumEpoch = args.epoch\nstep = args.step\nsampleSize = args.sampleSize\ncorpus = args.corpus\nweights = 'weights.hdf5'\n\n#########################\n##### Prepare Input #####\n#########################\n\nwith io.open(corpus, encoding='utf-8') as f:\n text = f.read().lower()\nprint('corpus length:', len(text))\n\nchars = sorted(list(set(text)))\nprint('total chars:', len(chars))\nchar_indices = dict((c, i) for i, c in enumerate(chars))\nindices_char = dict((i, c) for i, c in enumerate(chars))\n\ndata_size, vocab_size = len(text), len(chars)\n\n# cut the text in semi-redundant sequences of maxlen characters\nmaxlen = 40\nsentences = []\nnext_chars = []\n\ntextStart = 0\ntextEnd = 0\n\n# if sampleSize is smaller than the size of the corpus, use a smaller section of the corpus randomly chosen.\nif len(text) > sampleSize and sampleSize > 0:\n textEnd = random.randint(sampleSize - 1, len(text) - 1)\n textStart = textEnd - sampleSize\n for i in range(textStart, textEnd - maxlen, step):\n sentences.append(text[i: i + maxlen])\n next_chars.append(text[i + maxlen])\n print('nb sequences:', len(sentences))\nelse:\n # Using the entire corpus\n for i in range(0, len(text) - maxlen, step):\n sentences.append(text[i: i + maxlen])\n next_chars.append(text[i + maxlen])\n print('nb sequences:', len(sentences))\n\nprint('Vectorization...')\nx = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)\ny = np.zeros((len(sentences), len(chars)), dtype=np.bool)\nfor i, sentence in enumerate(sentences):\n for t, char in enumerate(sentence):\n x[i, t, char_indices[char]] = 1\n y[i, char_indices[next_chars[i]]] = 1\n\n\n\n\n###########################\n##### Construct Model #####\n###########################\n\n# build the model: a single LSTM\nprint('Build model...')\nmodel = Sequential()\nmodel.add(LSTM(128, input_shape=(maxlen, len(chars))))\nmodel.add(Dense(len(chars), activation='softmax'))\n\n# load weights\nif mode != 'init':\n model.load_weights(weights)\n\noptimizer = RMSprop(lr=0.01)\nmodel.compile(loss='categorical_crossentropy', optimizer=optimizer)\n\n\n\n\n#################################\n##### Training and Sampling #####\n#################################\n\ndef sample(preds, temperature=1.0):\n # helper function to sample an index from a probability array\n preds = np.asarray(preds).astype('float64')\n preds = np.log(preds) / temperature\n exp_preds = np.exp(preds)\n preds = exp_preds / np.sum(exp_preds)\n probas = np.random.multinomial(1, preds, 1)\n return np.argmax(probas)\n\n\ndef writeSample():\n file = open('out.txt', 'a')\n file.write('\\n \\n \\n')\n\n start_index = random.randint(0, len(text) - maxlen - 1)\n for diversity in [0.5, 1.0]:\n print('----- diversity:', diversity)\n\n generated = ''\n sentence = text[start_index: start_index + maxlen]\n generated += sentence\n print('----- Generating with seed: \"' + sentence + '\"')\n file.write('----- Generating with seed: \"' + sentence + '\" \\n')\n sys.stdout.write(generated)\n\n for i in range(printSize):\n x_pred = np.zeros((1, maxlen, len(chars)))\n for t, char in enumerate(sentence):\n x_pred[0, t, char_indices[char]] = 1.\n\n preds = model.predict(x_pred, verbose=0)[0]\n next_index = sample(preds, diversity)\n next_char = indices_char[next_index]\n\n generated += next_char\n sentence = sentence[1:] + next_char\n\n sys.stdout.write(next_char)\n sys.stdout.flush()\n print()\n file.write(generated + '\\n')\n\n# checkpoint used for init and train to save weights after each epoch\ncheckpoint = [ModelCheckpoint(filepath=weights, save_best_only=True)]\n\nif mode != 'sample':\n #train\n model.fit(x, y,\n batch_size=128,\n epochs=numEpoch,\n callbacks=checkpoint\n )\nelse:\n #sample\n writeSample()\n","sub_path":"lstm.py","file_name":"lstm.py","file_ext":"py","file_size_in_byte":5946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"232640828","text":"def upper_lower_case(word):\r\n upper, lower = 0, 0\r\n for i in word:\r\n if 'a' <= i and i <= 'z' :\r\n lower += 1\r\n if 'A' <= i and i <= 'Z':\r\n upper += 1\r\n print(\"UPPER CASE: {0}\\nLOWER CASE: {1}\".format(upper, lower))\r\n return word, \"Upper case --->\", upper, \"Lower case --->\", lower\r\n\r\ndef upperLower(s):\r\n upper, lower = 0, 0\r\n for i in s:\r\n lower += i.islower()\r\n upper += i.isupper()\r\n print(\"UPPER CASE: \", end = '')\r\n print(upper)\r\n print(\"LOWER CASE: \", end = '')\r\n print(lower)\r\n return s, \"Upper case ***\", upper, \"Lower case ***\", lower\r\n\r\ndef comp_Func_Upper_Lower(string):\r\n upper = sum(1 for i in string if i.isupper()) # sum function cumulatively sum up 1's \r\n lower = sum(1 for i in string if i.islower()) # if the condition is True\r\n print(\"UPPER CASE: %d\\nLOWER CASE: %d\" %(upper, lower))\r\n return string, \"Upper case +++\", upper, \"Lower case +++\", lower\r\n\r\ndef upper_lower_func(str_1):\r\n upper = 0\r\n lower = 0\r\n for x in str_1:\r\n if x.isupper() == True:\r\n upper += 1\r\n if x.islower() == True:\r\n lower += 1\r\n print(\"UPPER CASE:\", upper)\r\n print(\"LOWER CASE:\", lower)\r\n return str_1, \"Upper case ---\", upper, \"Lower case ---\", lower\r\n\r\ndef upper_lower_dict(str_2):\r\n dict_1 = {\"UPPER CASE\":0, \"LOWER CASE\":0}\r\n for char in str_2:\r\n if char.isupper():\r\n dict_1[\"UPPER CASE\"]+=1\r\n elif char.islower():\r\n dict_1[\"LOWER CASE\"]+=1\r\n else:\r\n pass\r\n return \"UPPER CASE: {0}\\nLOWER CASE: {1}\".format(dict_1[\"UPPER CASE\"], dict_1[\"LOWER CASE\"])\r\n\r\nword = input(\"Enter a word: \")\r\nprint(upper_lower_case(word))\r\nprint()\r\nprint(upperLower(word))\r\nprint()\r\nprint(comp_Func_Upper_Lower(word))\r\nprint()\r\nprint(upper_lower_func(word))\r\nprint()\r\nprint(upper_lower_dict(word))\r\n","sub_path":"Week13/Homework_2_with_solutions/ACM114_HW2_Question3_Solution.py","file_name":"ACM114_HW2_Question3_Solution.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"583699989","text":"import random\n\nfrom ..exceptions import DriverNotFound\nfrom .BaseConnection import BaseConnection\n\nCONNECTION_POOL = []\n\n\nclass PostgresConnection(BaseConnection):\n \"\"\"Postgres Connection class.\n \"\"\"\n\n name = \"postgres\"\n\n def make_connection(self):\n \"\"\"This sets the connection on the connection class\n \"\"\"\n try:\n import psycopg2\n except ModuleNotFoundError:\n raise DriverNotFound(\n \"You must have the 'psycopg2' package installed to make a connection to Postgres. Please install it using 'pip install psycopg2-binary'\"\n )\n\n self._connection = psycopg2.connect(**self.get_connection_details())\n\n return self\n\n def get_connection_details(self):\n \"\"\"This is responsible for standardizing the normal connection\n details and passing it into the connection.\n\n This will eventually be unpacked so make sure the keys are the same as the keywords\n that should pass to your connection method\n \"\"\"\n connection_details = {}\n connection_details.setdefault(\"host\", self.connection_details.get(\"host\"))\n connection_details.setdefault(\"user\", self.connection_details.get(\"user\"))\n connection_details.setdefault(\n \"password\", self.connection_details.get(\"password\")\n )\n connection_details.setdefault(\"port\", int(self.connection_details.get(\"port\")))\n connection_details.setdefault(\"dbname\", self.connection_details.get(\"database\"))\n connection_details.update(self.connection_details.get(\"options\", {}))\n return connection_details\n\n @classmethod\n def get_database_name(self):\n return self().get_connection_details().get(\"db\")\n\n def reconnect(self):\n pass\n\n def commit(self):\n \"\"\"Transaction\n \"\"\"\n return self._connection.commit()\n\n def begin(self):\n \"\"\"Transaction\n \"\"\"\n return self._connection.begin()\n\n def rollback(self):\n \"\"\"Transaction\n \"\"\"\n self._connection.rollback()\n\n def transaction_level(self):\n \"\"\"Transaction\n \"\"\"\n pass\n\n def query(self, query, bindings=(), results=\"*\"):\n \"\"\"Make the actual query that will reach the database and come back with a result.\n\n Arguments:\n query {string} -- A string query. This could be a qmarked string or a regular query.\n bindings {tuple} -- A tuple of bindings\n\n Keyword Arguments:\n results {str|1} -- If the results is equal to an asterisks it will call 'fetchAll'\n else it will return 'fetchOne' and return a single record. (default: {\"*\"})\n\n Returns:\n dict|None -- Returns a dictionary of results or None\n \"\"\"\n import psycopg2\n\n query = query.replace(\"'?'\", \"%s\")\n print(\"running query:\", query, bindings)\n if self._dry:\n return {}\n try:\n if self._connection.closed:\n self.make_connection()\n with self._connection.cursor(\n cursor_factory=psycopg2.extras.RealDictCursor\n ) as cursor:\n cursor.execute(query, bindings)\n if results == 1:\n return dict(cursor.fetchone() or {})\n else:\n if \"SELECT\" in cursor.statusmessage:\n return cursor.fetchall()\n return {}\n except Exception as e:\n raise e\n finally:\n self._connection.close()\n","sub_path":"src/masonite/orm/connections/PostgresConnection.py","file_name":"PostgresConnection.py","file_ext":"py","file_size_in_byte":3561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"88563100","text":"import os\nimport subprocess\nimport argparse\nimport datetime\n\n################################################################################\n# All problems represented in the paper's main table (problem, entry function) #\n################################################################################\nBENCHMARK = { \n (\"max\", \"max\"),\n (\"mem\", \"mem\"),\n (\"mirror\", \"mirror\"),\n (\"sigma\", \"sigma\"),\n (\"iter\", \"iter\"),\n (\"uniq\", \"uniq\"),\n (\"nat\", \"natmul\"),\n (\"formula\", \"eval\"),\n (\"lambda\", \"check\"), \n (\"diff\", \"grading\")\n}\n\n# Benchmarks' directiory\nBENCHMARK_DIR = \"benchmarks\"\nTA_DIR = os.path.join(BENCHMARK_DIR, \"ta_solutions\")\nTEST_DIR = os.path.join(BENCHMARK_DIR, \"testcases\")\nCORRECT_DIR = os.path.join(BENCHMARK_DIR, \"C\")\nINCORRECT_DIR = os.path.join(BENCHMARK_DIR, \"I\")\n\n############################################################\n# Options for running this script #\n############################################################\ndef parsing_arguments():\n parser = argparse.ArgumentParser(description = \"This script is written for reproducing tables in the paper\")\n\n parser.add_argument(\"-option\", type = str, default = \"all\",\n choices = [\"all\", \"cafe\", \"fixml\", \"func\", \"prog\", \"cafe2\"],\n help = \"Input an option of [all, cafe, fixml, func, prog]\\n the default option is \\'all\\'\"\n )\n\n parser.add_argument(\"-problem\", type = str, default = None,\n choices = [\"max\", \"mem\", \"mirror\", \"sigma\", \"iter\", \"uniq\", \"nat\", \"formula\", \"lambda\", \"diff\"],\n help = \"Input a name of problem you want to test (e.g., mirror)\"\n )\n\n args = parser.parse_args()\n return (args.option, args.problem)\n\n##################################################################\n# The subfunctions for utility. e.g., find directory lists etc. #\n##################################################################\n# find the all path of subdirectories\ndef find_dir(path):\n whole_list = os.listdir(path)\n result = []\n for name in whole_list :\n full_path = os.path.join(path,name)\n if(os.path.isdir(full_path)):\n result.append(full_path)\n else:\n continue\n return result\n\n# find the all path of subfiles\ndef find_files(path):\n whole_list = os.listdir(path)\n result = []\n for f in whole_list :\n result.append(os.path.join(path,f))\n return result\n\n# make directory for logging\ndef make_dir(directory):\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n# Executing the given shell commands\ndef execute_command(command):\n p = subprocess.Popen(command,stdout=subprocess.PIPE,stderr=subprocess.PIPE,shell=True)\n try: \n out, err = p.communicate (timeout=1800)\n out = out.decode()\n return (out, err)\n except subprocess.TimeoutExpired:\n p.terminate() \n p.wait()\n return (\"\", True)\n \n#############################################\n# Functions for stroing evaluation results #\n#############################################\n\n# Create a directory for storing result : result:hh::mm:ss/opt/problem/sub -> (original, patch, result) #\ndef make_result_dir(result_dir, problem, submission, opt):\n # result_time/opt\n result_dir = os.path.join(result_dir, opt)\n make_dir(result_dir)\n\n # result_time/opt/problem\n result_dir = os.path.join(result_dir, problem)\n make_dir(result_dir)\n\n # result_time/opt/problem/sub_n/\n result_dir = os.path.join(result_dir, submission)\n make_dir(result_dir)\n\n return result_dir\n\n# Parsing the original program, output, result (time, score, examples, etc...)\ndef parse_result(out):\n result_list = out.split(\"-----------------------------\")\n \n original = result_list[2]\n patch = result_list[4]\n result = result_list[6]\n\n return (original.strip(), patch.strip(), result.strip())\n\n###############################################\n# Functions for running script with an option #\n###############################################\ndef run_opt_with_submission (result_dir, problem, entry, submission_path, opt):\n submission_name = submission_path.split('/')[-1].replace(\".ml\", \"\")\n \n # Generate command\n opt_submission = \" -submission \" + submission_path\n opt_entry = \" -entry \" + entry\n opt_testcases = \" -testcases \" + os.path.join(TEST_DIR, problem + \"_testcases\")\n\n if opt == \"fixml\":\n opt_solution = \" -solution \" + os.path.join(TA_DIR, problem + \"_solution.ml\")\n elif opt == \"cafe2\":\n opt_solution = \" -solution \" + os.path.join(TA_DIR, problem + \"_solution.ml\") + \" -solutions \" + os.path.join(CORRECT_DIR, problem)\n else:\n opt_solution = \" -solutions \" + os.path.join(CORRECT_DIR, problem)\n\n if opt == \"fixml\" or opt == \"cafe\" or opt == \"cafe2\":\n opt_run = \" -fix\"\n elif opt == \"prog\":\n opt_run = \" -prog\"\n elif opt == \"func\":\n opt_run = \" -func\"\n\n # If a test driver is required, provide driver \n test_driver = os.path.join(TEST_DIR, problem + \"_grading.ml\")\n if os.path.exists(test_driver):\n opt_grading = \" -grading \" + test_driver\n else:\n opt_grading = \"\"\n\n command = \"engine/main.native\" + opt_run + opt_submission + opt_solution + opt_testcases + opt_grading + opt_entry\n print(command)\n (out, err) = execute_command(command)\n result_dir = make_result_dir(result_dir, problem, submission_name, opt)\n print(out)\n\n if not err:\n (original, patch, result) = parse_result(out)\n\n # Save original submission \n temp_original = os.path.join(result_dir, \"temp_original\")\n with open (temp_original, \"w\") as temp_file:\n temp_file.write(original)\n \n with open (temp_original, \"r\") as temp_file:\n original_path = os.path.join(result_dir, \"original.ml\")\n (out, err) = execute_command(\"ocamlformat \" + temp_original + \" -o \" + original_path)\n if err:\n execute_command(\"cp \" + temp_original + \" \" + original_path)\n execute_command (\"rm \" + temp_original)\n \n # Save patch result\n temp_patch = os.path.join(result_dir, \"temp_patch\")\n with open (temp_patch, \"w\") as temp_file:\n temp_file.write(patch)\n\n with open (temp_patch, \"r\") as temp_file:\n patch_path = os.path.join(result_dir, \"patch.ml\")\n (out, err) = execute_command(\"ocamlformat \" + temp_patch + \" -o \" + patch_path)\n if err:\n execute_command(\"echo \\\"\" + patch + \"\\\" > \" + patch_path)\n execute_command (\"rm \" + temp_patch)\n\n with open (os.path.join(result_dir, \"result.txt\"), \"w\") as result_file:\n result_file.write(result)\n else:\n execute_command (\"ocamlformat \" + submission_path + \" -o \" + os.path.join(result_dir, \"original.ml\"))\n execute_command (\"echo \\\";;\\nNone\\n\\\" > \" + os.path.join(result_dir, \"patch.ml\"))\n execute_command (\"echo \\\"[Timeout]: fails to run the submission \" + submission_path + \"\\\" > \" + os.path.join(result_dir, \"result.txt\"))\n\n\ndef run_opt_with_problem (result_dir, problem, opt):\n print (\"[INFO]: run problem \\'\" + problem + \"\\' by \\'\" + opt + \"\\' option\")\n for (problem_name, entry) in BENCHMARK:\n if problem == problem_name:\n submissions_path = os.path.join(INCORRECT_DIR, problem)\n submissions = find_files(submissions_path)\n submissions.sort(key = lambda x : int(x.split(\"/\")[-1].split(\"sub\")[1].split(\".\")[0]))\n for submission in submissions:\n run_opt_with_submission(result_dir, problem, entry, submission, opt)\n else:\n pass\n\ndef run_opt_all (result_dir, opt):\n for (problem, entry) in BENCHMARK:\n run_opt_with_problem (result_dir, problem, opt)\n\ndef main():\n (opt, problem) = parsing_arguments()\n\n # Create a directory for storing evaluation result\n curr_time = datetime.datetime.now().strftime(\"%H:%M:%S\")\n result_dir= \"result\" + curr_time\n make_dir(result_dir)\n\n # Default : run all programs with FixML, CAFE, and SARFGEN\n if opt == \"all\" and problem == None:\n run_opt_all (result_dir, \"cafe\")\n run_opt_all (result_dir, \"fixml\")\n run_opt_all (result_dir, \"prog\")\n run_opt_all (result_dir, \"func\")\n run_opt_all (result_dir, \"cafe2\")\n\n # Run all submissions of one problem by all options\n if opt == \"all\" and not problem == None:\n run_opt_with_problem (result_dir, problem, \"cafe\")\n run_opt_with_problem (result_dir, problem, \"fixml\")\n run_opt_with_problem (result_dir, problem, \"prog\")\n run_opt_with_problem (result_dir, problem, \"func\")\n run_opt_with_problem (result_dir, problem, \"cafe2\")\n\n # Run all submissions by specific option \n if not opt == \"all\" and problem == None:\n run_opt_all (result_dir, opt)\n \n # Run all submissions of one problem by specific option \n if not opt == \"all\" and not problem == None:\n run_opt_with_problem (result_dir, problem, opt)\n\nif __name__ == '__main__':\n main()","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":8558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"231670850","text":"import numpy as np\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\nimport os\r\nimport time\r\nfrom selenium import webdriver\r\nfrom utils import *\r\n\r\n#URL='https://sumx.ir'\r\n#headers={'User-Agent': 'Chrome/39.0.2171.95'}\r\n\r\n\r\ndef dfs(piles):\r\n stack = [[i,0,-1] for i in p.available_actions()]\r\n \r\n p.set_state()\r\n \r\n while(len(stack)):\r\n action, level, order= stack[0]\r\n if order>=0:\r\n stack.pop(0)\r\n p.undo_action(action)\r\n continue\r\n stack[0][-1] += 1\r\n p.perform_action(action)\r\n #time.sleep(2)\r\n ret = p.set_state()\r\n if ret == -1:\r\n stack.pop(0)\r\n p.undo_action(action)\r\n continue\r\n state = p.get_state()\r\n state.visited+=1\r\n if p.is_win_state():\r\n break\r\n if p.get_state().visited > 1:\r\n stack.pop(0)\r\n p.undo_action(action)\r\n continue\r\n available_actions = p.available_actions()\r\n if len(available_actions)==0:\r\n stack.pop(0)\r\n p.undo_action(action)\r\n continue\r\n #stack = [[action, level+1, -1] for action in available_actions] + stack\r\n for actions in available_actions:\r\n stack.append([actions, level+1, -1])\r\n \r\n return [i[0] for i in stack if i[-1]>=0]\r\n \r\ndef parse_html(html):\r\n piles=np.full((4,14), [\" \"], dtype='object')\r\n soup = BeautifulSoup(open(html), \"html.parser\")\r\n balls=soup.find_all('div',{'class': \"ball\"})\r\n for b in range(len(balls)):\r\n i=3-b%4\r\n j=b//4\r\n try:\r\n piles[i][j]=balls[b]['class'][3]+balls[b]['class'][4].split('-')[1]\r\n except:\r\n piles[i][j]=balls[b]['class'][3]\r\n return piles\r\n\r\nif __name__ == \"__main__\":\r\n \r\n piles = parse_html(\"sort-ball.html\")\r\n \r\n p = Piles()\r\n for i in range(14):\r\n p.add_pile(Pile(piles[:,i]))\r\n p.set_original_piles()\r\n \r\n ans = dfs(p)\r\n print(\"Total Steps: {}\".format(len(ans)))\r\n for step in ans:\r\n print(step)","sub_path":"puzzle.py","file_name":"puzzle.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"568550932","text":"from calendar import monthrange\nfrom datetime import date\n\n\ndef meetup_day(yy, mm, weekday, abr):\n first_day, total_days = monthrange(yy, mm)\n days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday',\n 'Friday', 'Saturday', 'Sunday']\n\n # difference between first day and the dd i'm looking for\n start = (1 + (7 + (days.index(weekday) - first_day)) % 7)\n month_dates = [a for a in range(start, total_days+1, 7)]\n\n if abr == 'last':\n dex = -1\n elif abr == 'teenth': # find index for 'teenth'\n for index, day in enumerate(month_dates):\n if 13 <= day <= 19:\n dex = index\n break\n else:\n convert = ['1st', '2nd', '3rd', '4th', '5th']\n dex = convert.index(abr)\n\n try:\n return date(yy, mm, month_dates[dex])\n except IndexError:\n raise MeetupDayException('Error. Date doesn\\'t exist!')\n\nclass MeetupDayException(Exception):\n pass\n","sub_path":"python/meetup/meetup.py","file_name":"meetup.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"509689133","text":"import functools\nimport tensorflow as tf\nfrom tensorflow.contrib.layers.python.layers import encoders\ndef lazy_property(function):\n attribute = '_cache_' + function.__name__\n\n @property\n @functools.wraps(function)\n def decorator(self):\n if not hasattr(self, attribute):\n setattr(self, attribute, function(self))\n return getattr(self, attribute)\n\n return decorator\n\nfrom tensorflow.contrib import rnn\nclass biGRU_Model:\n def __init__(self, config, is_training=True):\n self.embed_size = config.embed_size\n self.hidden_size = config.hidden_size\n self.label_size = config.label_size\n self.batch_size = config.batch_size\n self.max_doc_len = config.max_doc_len\n self.vocab_size = config.vocab_size\n self.is_training = is_training\n self.x = tf.placeholder(tf.int32, \n [self.batch_size, None])\n self.y = tf.placeholder(tf.int32, [self.batch_size])\n self.lengths = tf.placeholder(tf.int32, [self.batch_size])\n self.predict\n if is_training:\n self.optimize\n print('Model Initialized!')\n \n @lazy_property\n def cost(self):\n logits = self.inference\n targets = tf.one_hot(self.y, self.label_size, 1, 0)\n targets = tf.cast(targets, tf.float32)\n #Note tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=activation)\n loss = tf.losses.softmax_cross_entropy(targets, logits)\n return loss\n \n @lazy_property\n def predict(self):\n logits = self.inference\n #probs = tf.nn.softmax(logits)\n predictions = tf.argmax(logits, 1)\n return predictions\n \n @lazy_property\n def correct_num(self):\n prediction = self.predict\n targets = tf.reshape(self.y, [-1])\n targets = tf.cast(targets, tf.int64)\n correct_prediction = tf.equal(prediction, targets)\n correct_num = tf.reduce_sum(tf.cast(correct_prediction, \"float\"))\n return correct_num\n \n @lazy_property\n def optimize(self):\n with tf.variable_scope('optimizer'):\n cost = self.cost\n #with tf.name_scope('Optimizer'):\n #self._learning_rate = tf.Variable(0.0, trainable=False)\n train_op = tf.train.AdamOptimizer(0.0001).minimize(cost)\n #train_op = tf.train.AdamOptimizer(self._learning_rate).minimize(cost)\n #tvars = tf.trainable_variables()\n #grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), 6)\n #optimizer = tf.train.AdamOptimizer(self._learning_rate)\n #train_op = optimizer.apply_gradients(zip(grads, tvars))\n return train_op\n \n @lazy_property\n def inference(self):\n #Create embedding matrix\n with tf.device(\"/cpu:0\"):\n embeddings = tf.get_variable('embedding', [self.vocab_size, self.embed_size])\n inputs = tf.nn.embedding_lookup(embeddings, self.x)\n if self.is_training:\n inputs = tf.nn.dropout(inputs, 0.5)\n\n def lstm():\n return rnn.BasicLSTMCell(self.hidden_size, forget_bias=0.0, \n state_is_tuple=True) \n \n def GRU():\n return rnn.GRUCell(self.hidden_size)\n #lstm_cell = lstm\n #cell = rnn.MultiRNNCell([lstm_cell() for _ in range(2)], \n #state_is_tuple=True)\n fw_cell = GRU()\n bw_cell = GRU()\n initial_fw_state = fw_cell.zero_state(self.batch_size, tf.float32)\n initial_bw_state = bw_cell.zero_state(self.batch_size, tf.float32)\n #Bidirectional dynamic RNN with given lengths for each text\n outputs, status = tf.nn.bidirectional_dynamic_rnn(fw_cell, bw_cell, inputs,\n initial_state_fw=initial_fw_state,\n initial_state_bw=initial_bw_state,\n sequence_length=self.lengths, \n dtype=tf.float32)\n #In a dynamic RNN, if the length is N, the outputs for the words after N are 0\n #And the status are copycats of the last status of the Nth word\n #Use bidirectional rnn output as hidden states for words\n #size=batch_size, word_length, word_embedding_size*2\n #print(status)\n V = tf.concat([status[0], status[1]], axis=1)\n \n if self.is_training:\n output = tf.nn.dropout(V, 0.5)\n else:\n output = V\n\n with tf.variable_scope('output_layer'):\n logits = tf.layers.dense(output, self.label_size, activation=None)\n #预测值\n return logits\n \n @property\n def learningRate(self):\n return self._learning_rate","sub_path":"models/.ipynb_checkpoints/Bidirectional_RNN-checkpoint.py","file_name":"Bidirectional_RNN-checkpoint.py","file_ext":"py","file_size_in_byte":4808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"526903456","text":"def RadixSort(A):\n m=max(A)\n d=0\n while m>0:\n m=m//10\n d=d+1\n x=d-1\n n=0\n B=[None]*len(A)\n while n<=x:\n CountSort(A,B,n)\n print(B)\n n=n+1\ndef CountSort(A,B,x):\n C=[0]*(10)\n for i in range(len(A)):\n C[(A[i]//pow(10,x))%10]+=1\n for i in range(1,10):\n C[i]=C[i-1]+C[i]\n j=len(A)-1\n while j>=0:\n B[C[(A[j]//pow(10,x))%10]-1]=A[j]\n C[(A[j]//pow(10,x))%10]-=1\n j=j-1\n for i in range(len(A)):\n A[i]=B[i]\n\nif __name__==\"__main__\":\n a=[]\n b=map(int,input().rstrip().split())\n for i in b:\n a.append(i)\n RadixSort(a)\n\n","sub_path":"radixsort.py","file_name":"radixsort.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"262970652","text":"# -*- coding: utf-8 -*-\n\"\"\"\n proxy.py\n ~~~~~~~~\n ⚡⚡⚡ Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on\n Network monitoring, controls & Application development, testing, debugging.\n\n :copyright: (c) 2013-present by Abhinav Singh and contributors.\n :license: BSD, see LICENSE for more details.\n\"\"\"\nimport unittest\nimport selectors\nimport json\n\nfrom urllib import parse as urlparse\nfrom unittest import mock\nfrom typing import cast\n\nfrom proxy.proxy import Proxy\nfrom proxy.core.connection import TcpClientConnection\nfrom proxy.http.handler import HttpProtocolHandler\nfrom proxy.http.proxy import HttpProxyPlugin\nfrom proxy.common.utils import build_http_request, bytes_, build_http_response\nfrom proxy.common.constants import PROXY_AGENT_HEADER_VALUE, DEFAULT_HTTP_PORT\nfrom proxy.http.codes import httpStatusCodes\n\nfrom proxy.plugin import ProposedRestApiPlugin, RedirectToCustomServerPlugin\n\nfrom .utils import get_plugin_by_test_name\n\n\nclass TestHttpProxyPluginExamples(unittest.TestCase):\n\n @mock.patch('selectors.DefaultSelector')\n @mock.patch('socket.fromfd')\n def setUp(self,\n mock_fromfd: mock.Mock,\n mock_selector: mock.Mock) -> None:\n self.fileno = 10\n self._addr = ('127.0.0.1', 54382)\n self.flags = Proxy.initialize()\n self.plugin = mock.MagicMock()\n\n self.mock_fromfd = mock_fromfd\n self.mock_selector = mock_selector\n\n plugin = get_plugin_by_test_name(self._testMethodName)\n\n self.flags.plugins = {\n b'HttpProtocolHandlerPlugin': [HttpProxyPlugin],\n b'HttpProxyBasePlugin': [plugin],\n }\n self._conn = mock_fromfd.return_value\n self.protocol_handler = HttpProtocolHandler(\n TcpClientConnection(self._conn, self._addr),\n flags=self.flags)\n self.protocol_handler.initialize()\n\n @mock.patch('proxy.http.proxy.server.TcpServerConnection')\n def test_modify_post_data_plugin(\n self, mock_server_conn: mock.Mock) -> None:\n original = b'{\"key\": \"value\"}'\n modified = b'{\"key\": \"modified\"}'\n\n self._conn.recv.return_value = build_http_request(\n b'POST', b'http://httpbin.org/post',\n headers={\n b'Host': b'httpbin.org',\n b'Content-Type': b'application/x-www-form-urlencoded',\n b'Content-Length': bytes_(len(original)),\n },\n body=original\n )\n self.mock_selector.return_value.select.side_effect = [\n [(selectors.SelectorKey(\n fileobj=self._conn,\n fd=self._conn.fileno,\n events=selectors.EVENT_READ,\n data=None), selectors.EVENT_READ)], ]\n\n self.protocol_handler.run_once()\n mock_server_conn.assert_called_with('httpbin.org', DEFAULT_HTTP_PORT)\n mock_server_conn.return_value.queue.assert_called_with(\n build_http_request(\n b'POST', b'/post',\n headers={\n b'Host': b'httpbin.org',\n b'Content-Length': bytes_(len(modified)),\n b'Content-Type': b'application/json',\n b'Via': b'1.1 %s' % PROXY_AGENT_HEADER_VALUE,\n },\n body=modified\n )\n )\n\n @mock.patch('proxy.http.proxy.server.TcpServerConnection')\n def test_proposed_rest_api_plugin(\n self, mock_server_conn: mock.Mock) -> None:\n path = b'/v1/users/'\n self._conn.recv.return_value = build_http_request(\n b'GET', b'http://%s%s' % (\n ProposedRestApiPlugin.API_SERVER, path),\n headers={\n b'Host': ProposedRestApiPlugin.API_SERVER,\n }\n )\n self.mock_selector.return_value.select.side_effect = [\n [(selectors.SelectorKey(\n fileobj=self._conn,\n fd=self._conn.fileno,\n events=selectors.EVENT_READ,\n data=None), selectors.EVENT_READ)], ]\n self.protocol_handler.run_once()\n\n mock_server_conn.assert_not_called()\n self.assertEqual(\n self.protocol_handler.client.buffer[0].tobytes(),\n build_http_response(\n httpStatusCodes.OK, reason=b'OK',\n headers={b'Content-Type': b'application/json'},\n body=bytes_(\n json.dumps(\n ProposedRestApiPlugin.REST_API_SPEC[path]))\n ))\n\n @mock.patch('proxy.http.proxy.server.TcpServerConnection')\n def test_redirect_to_custom_server_plugin(\n self, mock_server_conn: mock.Mock) -> None:\n request = build_http_request(\n b'GET', b'http://example.org/get',\n headers={\n b'Host': b'example.org',\n }\n )\n self._conn.recv.return_value = request\n self.mock_selector.return_value.select.side_effect = [\n [(selectors.SelectorKey(\n fileobj=self._conn,\n fd=self._conn.fileno,\n events=selectors.EVENT_READ,\n data=None), selectors.EVENT_READ)], ]\n self.protocol_handler.run_once()\n\n upstream = urlparse.urlsplit(\n RedirectToCustomServerPlugin.UPSTREAM_SERVER)\n mock_server_conn.assert_called_with('localhost', 8899)\n mock_server_conn.return_value.queue.assert_called_with(\n build_http_request(\n b'GET', upstream.path,\n headers={\n b'Host': upstream.netloc,\n b'Via': b'1.1 %s' % PROXY_AGENT_HEADER_VALUE,\n }\n )\n )\n\n @mock.patch('proxy.http.proxy.server.TcpServerConnection')\n def test_filter_by_upstream_host_plugin(\n self, mock_server_conn: mock.Mock) -> None:\n request = build_http_request(\n b'GET', b'http://google.com/',\n headers={\n b'Host': b'google.com',\n }\n )\n self._conn.recv.return_value = request\n self.mock_selector.return_value.select.side_effect = [\n [(selectors.SelectorKey(\n fileobj=self._conn,\n fd=self._conn.fileno,\n events=selectors.EVENT_READ,\n data=None), selectors.EVENT_READ)], ]\n self.protocol_handler.run_once()\n\n mock_server_conn.assert_not_called()\n self.assertEqual(\n self.protocol_handler.client.buffer[0].tobytes(),\n build_http_response(\n status_code=httpStatusCodes.I_AM_A_TEAPOT,\n reason=b'I\\'m a tea pot',\n headers={\n b'Connection': b'close'\n },\n )\n )\n\n @mock.patch('proxy.http.proxy.server.TcpServerConnection')\n def test_man_in_the_middle_plugin(\n self, mock_server_conn: mock.Mock) -> None:\n request = build_http_request(\n b'GET', b'http://super.secure/',\n headers={\n b'Host': b'super.secure',\n }\n )\n self._conn.recv.return_value = request\n\n server = mock_server_conn.return_value\n server.connect.return_value = True\n\n def has_buffer() -> bool:\n return cast(bool, server.queue.called)\n\n def closed() -> bool:\n return not server.connect.called\n\n server.has_buffer.side_effect = has_buffer\n type(server).closed = mock.PropertyMock(side_effect=closed)\n\n self.mock_selector.return_value.select.side_effect = [\n [(selectors.SelectorKey(\n fileobj=self._conn,\n fd=self._conn.fileno,\n events=selectors.EVENT_READ,\n data=None), selectors.EVENT_READ)],\n [(selectors.SelectorKey(\n fileobj=server.connection,\n fd=server.connection.fileno,\n events=selectors.EVENT_WRITE,\n data=None), selectors.EVENT_WRITE)],\n [(selectors.SelectorKey(\n fileobj=server.connection,\n fd=server.connection.fileno,\n events=selectors.EVENT_READ,\n data=None), selectors.EVENT_READ)], ]\n\n # Client read\n self.protocol_handler.run_once()\n mock_server_conn.assert_called_with('super.secure', DEFAULT_HTTP_PORT)\n server.connect.assert_called_once()\n queued_request = \\\n build_http_request(\n b'GET', b'/',\n headers={\n b'Host': b'super.secure',\n b'Via': b'1.1 %s' % PROXY_AGENT_HEADER_VALUE\n }\n )\n server.queue.assert_called_once_with(queued_request)\n\n # Server write\n self.protocol_handler.run_once()\n server.flush.assert_called_once()\n\n # Server read\n server.recv.return_value = \\\n build_http_response(\n httpStatusCodes.OK,\n reason=b'OK', body=b'Original Response From Upstream')\n self.protocol_handler.run_once()\n self.assertEqual(\n self.protocol_handler.client.buffer[0].tobytes(),\n build_http_response(\n httpStatusCodes.OK,\n reason=b'OK', body=b'Hello from man in the middle')\n )\n\n @mock.patch('proxy.http.proxy.server.TcpServerConnection')\n def test_filter_by_url_regex_plugin(\n self, mock_server_conn: mock.Mock) -> None:\n request = build_http_request(\n b'GET', b'http://www.facebook.com/tr/',\n headers={\n b'Host': b'www.facebook.com',\n }\n )\n self._conn.recv.return_value = request\n self.mock_selector.return_value.select.side_effect = [\n [(selectors.SelectorKey(\n fileobj=self._conn,\n fd=self._conn.fileno,\n events=selectors.EVENT_READ,\n data=None), selectors.EVENT_READ)], ]\n self.protocol_handler.run_once()\n\n self.assertEqual(\n self.protocol_handler.client.buffer[0].tobytes(),\n build_http_response(\n status_code=httpStatusCodes.NOT_FOUND,\n reason=b'Blocked',\n headers={b'Connection': b'close'},\n )\n )\n","sub_path":"tests/plugin/test_http_proxy_plugins.py","file_name":"test_http_proxy_plugins.py","file_ext":"py","file_size_in_byte":10332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"331827424","text":"#!/usr/bin/env python\nimport numpy as np\nimport pandas as pd\nimport os\nimport glob\nimport datetime\nfrom astropy.time import Time\n\nimport abc\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Ellipse\n\n\nclass IntermediateDataParser(object):\n \"\"\"\n Base class for parsing Hip1 and Hip2 data. self.epoch, self.covariance_matrix and self.scan_angle are saved\n as panda dataframes. use .values (e.g. self.epoch.values) to call the ndarray version.\n \"\"\"\n def __init__(self, scan_angle=None, epoch=None, residuals=None, inverse_covariance_matrix=None):\n self.scan_angle = scan_angle\n self._epoch = epoch\n self.residuals = residuals\n self.inverse_covariance_matrix = inverse_covariance_matrix\n\n @staticmethod\n def read_intermediate_data_file(star_hip_id, intermediate_data_directory, skiprows, header, sep):\n filepath = os.path.join(os.path.join(intermediate_data_directory, '**/'), '*' + star_hip_id + '*')\n filepath_list = glob.glob(filepath, recursive=True)\n if len(filepath_list) > 1:\n raise ValueError('More than one input file with hip id {0} found'.format(star_hip_id))\n data = pd.read_csv(filepath_list[0], sep=sep, skiprows=skiprows, header=header, engine='python')\n return data\n\n @abc.abstractmethod\n def parse(self, star_id, intermediate_data_parent_directory, **kwargs):\n pass\n\n def julian_day_epoch(self):\n return self.convert_hip_style_epochs_to_julian_day(self._epoch)\n\n @staticmethod\n def convert_hip_style_epochs_to_julian_day(epochs, half_day_correction=True):\n jd_epochs = []\n for epoch in epochs.values:\n epoch_year = int(epoch)\n fraction = epoch - int(epoch)\n utc_time = datetime.datetime(year=epoch_year, month=1, day=1) + datetime.timedelta(days=365.25) * fraction\n if half_day_correction:\n utc_time += datetime.timedelta(days=0.5)\n jd_epochs.append(Time(utc_time).jd)\n return np.array(jd_epochs)\n\n def calculate_inverse_covariance_matrices(self, cross_scan_along_scan_var_ratio=1E5):\n cov_matrices = calculate_covariance_matrices(self.scan_angle,\n cross_scan_along_scan_var_ratio=cross_scan_along_scan_var_ratio)\n icov_matrices = np.zeros_like(cov_matrices)\n for i in range(len(cov_matrices)):\n icov_matrices[i] = np.linalg.pinv(cov_matrices[i])\n self.inverse_covariance_matrix = icov_matrices\n\n\ndef calculate_covariance_matrices(scan_angles, cross_scan_along_scan_var_ratio=1E5):\n \"\"\"\n :param scan_angles: pandas DataFrame with scan angles, e.g. as-is from the data parsers. scan_angles.values is a\n numpy array with the scan angles\n :param cross_scan_along_scan_var_ratio: var_cross_scan / var_along_scan\n :return An ndarray with shape (len(scan_angles), 2, 2), e.g. an array of covariance matrices in the same order\n as the scan angles\n \"\"\"\n covariance_matrices = []\n cov_matrix_in_scan_basis = np.array([[cross_scan_along_scan_var_ratio, 0],\n [0, 1]])\n # we define the along scan to be 'y' in the scan basis.\n for theta in scan_angles.values.flatten():\n # for Hipparcos, theta is measured against north, specifically east of the north equatorial pole\n c, s = np.cos(theta), np.sin(theta)\n Rccw = np.array([[c, -s], [s, c]])\n cov_matrix_in_ra_dec_basis = np.matmul(np.matmul(Rccw, cov_matrix_in_scan_basis), Rccw.T)\n covariance_matrices.append(cov_matrix_in_ra_dec_basis)\n return np.array(covariance_matrices)\n\n\nclass HipparcosOriginalData(IntermediateDataParser):\n def __init__(self, scan_angle=None, epoch=None, residuals=None, inverse_covariance_matrix=None):\n super(HipparcosOriginalData, self).__init__(scan_angle=scan_angle,\n epoch=epoch, residuals=residuals,\n inverse_covariance_matrix=inverse_covariance_matrix)\n\n def parse(self, star_hip_id, intermediate_data_directory, data_choice='NDAC'):\n \"\"\"\n :param star_hip_id: a string which is just the number for the HIP ID.\n :param intermediate_data_directory: the path (string) to the place where the intermediate data is stored, e.g.\n Hip2/IntermediateData/resrec\n note you have to specify the file resrec or absrec. We use the residual records, so specify resrec.\n :param data_choice: 'FAST' or 'NDAC'. This slightly affects the scan angles. This mostly affects\n the residuals which are not used.\n \"\"\"\n if (data_choice is not 'NDAC') and (data_choice is not 'FAST'):\n raise ValueError('data choice has to be either NDAC or FAST')\n data = self.read_intermediate_data_file(star_hip_id, intermediate_data_directory,\n skiprows=0, header='infer', sep='\\s*\\|\\s*')\n # select either the data from the NDAC or the FAST consortium.\n data = data[data['IA2'] == data_choice[0]]\n # compute scan angles and observations epochs according to van Leeuwen & Evans 1997, eq. 11 & 12.\n self.scan_angle = np.arctan2(data['IA3'], data['IA4']) # unit radians\n self._epoch = data['IA6'] / data['IA3'] + 1991.25\n self.residuals = data['IA8'] # unit milli-arcseconds (mas)\n\n\nclass HipparcosRereductionData(IntermediateDataParser):\n def __init__(self, scan_angle=None, epoch=None, residuals=None, inverse_covariance_matrix=None):\n super(HipparcosRereductionData, self).__init__(scan_angle=scan_angle,\n epoch=epoch, residuals=residuals,\n inverse_covariance_matrix=inverse_covariance_matrix)\n\n def parse(self, star_hip_id, intermediate_data_directory, **kwargs):\n data = self.read_intermediate_data_file(star_hip_id, intermediate_data_directory,\n skiprows=1, header=None, sep='\\s+')\n # compute scan angles and observations epochs from van Leeuwen 2007, table G.8\n # see also Figure 2.1, section 2.5.1, and section 4.1.2\n self.scan_angle = np.arctan2(data[3], data[4]) # data[3] = cos(psi), data[4] = sin(psi)\n self._epoch = data[1] + 1991.25\n self.residuals = data[5] # unit milli-arcseconds (mas)\n\n\nclass GaiaData(IntermediateDataParser):\n def __init__(self, scan_angle=None, epoch=None, residuals=None, inverse_covariance_matrix=None):\n super(GaiaData, self).__init__(scan_angle=scan_angle,\n epoch=epoch, residuals=residuals,\n inverse_covariance_matrix=inverse_covariance_matrix)\n\n def parse(self, star_hip_id, intermediate_data_directory, **kwargs):\n data = self.read_intermediate_data_file(star_hip_id, intermediate_data_directory,\n skiprows=0, header='infer', sep='\\s*,\\s*')\n self._epoch = data['ObservationTimeAtBarycentre[BarycentricJulianDateInTCB]']\n self.scan_angle = data['scanAngle[rad]']\n\n def julian_day_epoch(self):\n return self._epoch.values.flatten()\n\n\nclass AstrometricFitter(object):\n \"\"\"\n :param inverse_covariance_matrices: ndarray of length epoch times with the 2x2 inverse covariance matrices\n for each epoch\n :param epoch_times: 1D ndarray with the times for each epoch.\n \"\"\"\n def __init__(self, inverse_covariance_matrices=None, epoch_times=None,\n astrometric_chi_squared_matrices=None, astrometric_solution_vector_components=None):\n self.inverse_covariance_matrices = inverse_covariance_matrices\n self.epoch_times = epoch_times\n if astrometric_solution_vector_components is None:\n self.astrometric_solution_vector_components = self._init_astrometric_solution_vectors()\n if astrometric_chi_squared_matrices is None:\n self.astrometric_chi_squared_matrices = self._init_astrometric_chi_squared_matrices()\n\n def fit_line(self, ra_vs_epoch, dec_vs_epoch):\n \"\"\"\n :param ra_vs_epoch: 1d array of right ascension, ordered the same as the covariance matrices and epochs.\n :param dec_vs_epoch: 1d array of declination, ordered the same as the covariance matrices and epochs.\n :return:\n \"\"\"\n return np.linalg.solve(self._chi2_matrix(), self._chi2_vector(ra_vs_epoch=ra_vs_epoch,\n dec_vs_epoch=dec_vs_epoch))\n\n def _chi2_matrix(self):\n return np.sum(self.astrometric_chi_squared_matrices, axis=0)\n\n def _chi2_vector(self, ra_vs_epoch, dec_vs_epoch):\n ra_solution_vecs = self.astrometric_solution_vector_components['ra']\n dec_solution_vecs = self.astrometric_solution_vector_components['dec']\n # sum together the individual solution vectors for each epoch\n return np.dot(ra_vs_epoch, ra_solution_vecs) + np.dot(dec_vs_epoch, dec_solution_vecs)\n\n def _init_astrometric_solution_vectors(self):\n num_epochs = len(self.epoch_times)\n astrometric_solution_vector_components = {'ra': np.zeros((num_epochs, 4)),\n 'dec': np.zeros((num_epochs, 4))}\n for epoch in range(num_epochs):\n d, b, c, a = unpack_elements_of_matrix(self.inverse_covariance_matrices[epoch])\n b, c = -b, -c\n epoch_time = self.epoch_times[epoch]\n ra_vec, dec_vec = np.zeros(4).astype(np.float64), np.zeros(4).astype(np.float64)\n ra_vec[0] = -(-2 * d * epoch_time)\n ra_vec[1] = -((b + c) * epoch_time)\n ra_vec[2] = -(-2 * d)\n ra_vec[3] = -(b + c)\n\n dec_vec[0] = -((b + c) * epoch_time)\n dec_vec[1] = -(- 2 * a * epoch_time)\n dec_vec[2] = -(b + c)\n dec_vec[3] = -(- 2 * a)\n\n astrometric_solution_vector_components['ra'][epoch] = ra_vec\n astrometric_solution_vector_components['dec'][epoch] = dec_vec\n return astrometric_solution_vector_components\n\n def _init_astrometric_chi_squared_matrices(self):\n num_epochs = len(self.epoch_times)\n astrometric_chi_squared_matrices = np.zeros((num_epochs, 4, 4))\n for epoch in range(num_epochs):\n d, b, c, a = unpack_elements_of_matrix(self.inverse_covariance_matrices[epoch])\n b, c = -b, -c\n epoch_time = self.epoch_times[epoch]\n\n A = np.zeros((4, 4))\n\n A[:, 0] = np.array([2 * d * epoch_time,\n (-b - c) * epoch_time,\n 2 * d,\n (-b - c)])\n A[:, 1] = np.array([(-b - c) * epoch_time,\n 2 * a * epoch_time,\n (-b - c),\n 2 * a])\n A[:, 2] = np.array([2 * d * epoch_time ** 2,\n (-b - c) * epoch_time ** 2,\n 2 * d * epoch_time,\n (-b - c) * epoch_time])\n A[:, 3] = np.array([(-b - c) * epoch_time ** 2,\n 2 * a * epoch_time ** 2,\n (-b - c) * epoch_time,\n 2 * a * epoch_time])\n\n astrometric_chi_squared_matrices[epoch] = A\n return astrometric_chi_squared_matrices\n\n\ndef unpack_elements_of_matrix(matrix):\n return matrix.flatten()\n\n\n\"\"\"\nUtility functions for plotting.\n\"\"\"\n\n\ndef plot_fitting_to_astrometric_data(astrometric_data):\n # solving\n fitter = AstrometricFitter(inverse_covariance_matrices=astrometric_data['covariance_matrix'],\n epoch_times=astrometric_data['epoch_delta_t'])\n solution_vector = fitter.fit_line(ra_vs_epoch=astrometric_data['ra'],\n dec_vs_epoch=astrometric_data['dec'])\n # plotting\n plt.figure()\n plt.errorbar(astrometric_data['epoch_delta_t'], astrometric_data['ra'],\n xerr=0, yerr=np.sqrt(astrometric_data['covariance_matrix'][:, 0, 0]),\n fmt='ro', label='RA')\n plt.errorbar(astrometric_data['epoch_delta_t'], astrometric_data['dec'],\n xerr=0, yerr=np.sqrt(astrometric_data['covariance_matrix'][:, 1, 1]),\n fmt='bo', label='DEC')\n continuous_t = np.linspace(np.min(astrometric_data['epoch_delta_t']),\n np.max(astrometric_data['epoch_delta_t']), num=200)\n ra0, dec0, mu_ra, mu_dec = solution_vector\n plt.plot(continuous_t, ra0 + mu_ra * continuous_t, 'r', label='RA fit')\n plt.plot(continuous_t, dec0 + mu_dec * continuous_t, 'b', label='DEC fit')\n plt.xlabel('$\\Delta$ epoch')\n plt.ylabel('RA or DEC')\n plt.legend(loc='best')\n plt.title('RA and DEC linear fit using Covariance Matrices')\n\n\ndef plot_error_ellipse(ax, mu, cov_matrix, color=\"b\"):\n \"\"\"\n Based on\n http://stackoverflow.com/questions/17952171/not-sure-how-to-fit-data-with-a-gaussian-python.\n \"\"\"\n # Compute eigenvalues and associated eigenvectors\n vals, vecs = np.linalg.eigh(cov_matrix)\n\n # Compute \"tilt\" of ellipse using first eigenvector\n x, y = vecs[:, 0]\n theta = np.degrees(np.arctan2(y, x))\n\n # Eigenvalues give length of ellipse along each eigenvector\n w, h = 2 * np.sqrt(vals)\n ellipse = Ellipse(mu, w, h, theta, color=color) # color=\"k\")\n ellipse.set_clip_box(ax.bbox)\n ellipse.set_alpha(0.2)\n ax.add_artist(ellipse)\n return ax\n\n\ndef generate_parabolic_astrometric_data(correlation_coefficient=0.0, sigma_ra=0.1, sigma_dec=0.1, num_measurements=20, crescendo=False):\n astrometric_data = {}\n num_measurements = num_measurements\n mu_ra, mu_dec = -1, 2\n acc_ra, acc_dec = -0.1, 0.2\n ra0, dec0 = -30, 40\n epoch_start = 0\n epoch_end = 200\n astrometric_data['epoch_delta_t'] = np.linspace(epoch_start, epoch_end, num=num_measurements)\n astrometric_data['dec'] = dec0 + astrometric_data['epoch_delta_t']*mu_dec + \\\n 1 / 2 * acc_dec * astrometric_data['epoch_delta_t'] ** 2\n astrometric_data['ra'] = ra0 + astrometric_data['epoch_delta_t']*mu_ra + \\\n 1 / 2 * acc_ra * astrometric_data['epoch_delta_t'] ** 2\n cc = correlation_coefficient\n astrometric_data['covariance_matrix'] = np.zeros((num_measurements, 2, 2))\n astrometric_data['covariance_matrix'][:] = np.array([[sigma_ra**2, sigma_ra*sigma_dec*cc],\n [sigma_ra*sigma_dec*cc, sigma_dec**2]])\n if crescendo:\n astrometric_data['covariance_matrix'][:, 0, 0] *= np.linspace(1/10, 4, num=num_measurements)\n astrometric_data['covariance_matrix'][:, 1, 1] *= np.linspace(4, 1/10, num=num_measurements)\n for i in range(len(astrometric_data)):\n astrometric_data['inverse_covariance_matrix'][i] = np.linalg.pinv(astrometric_data['covariance_matrix'][i])\n astrometric_data['linear_solution'] = np.array([ra0, dec0, mu_ra, mu_dec])\n return astrometric_data\n\n\nif __name__ == \"__main__\":\n\n data = HipparcosRereductionData()\n data.parse(intermediate_data_directory='/home/mbrandt21/Downloads/Hip2/IntermediateData/resrec',\n star_hip_id='27321')\n scan_angles = data.scan_angle.truncate(after=20)\n multiplier = 20\n covariances = calculate_covariance_matrices(scan_angles, cross_scan_along_scan_var_ratio=multiplier)\n f, ax = plt.subplots()\n for i in range(len(scan_angles)):\n center = data.julian_day_epoch()[i]\n ax = plot_error_ellipse(ax, mu=(center, 0), cov_matrix=covariances[i])\n ax.set_xlim((np.min(data.julian_day_epoch()), np.max(data.julian_day_epoch())))\n ax.set_ylim((-multiplier, multiplier))\n angle = scan_angles.values.flatten()[i]\n ax.plot([center, center -np.sin(angle)], [0, np.cos(angle)], 'k')\n ax.set_title('along scan angle {0} degrees east from the northern equatorial pole'.format(angle*180/np.pi))\n plt.axis('equal')\n\n data = HipparcosRereductionData()\n data.parse(intermediate_data_directory='/home/mbrandt21/Downloads/Hip2/IntermediateData/resrec',\n star_hip_id='49699')\n scan_angles = data.scan_angle\n astrometric_data = generate_parabolic_astrometric_data(correlation_coefficient=0, sigma_ra=5E2,\n sigma_dec=5E2, num_measurements=len(scan_angles))\n astrometric_data['covariance_matrix'] = calculate_covariance_matrices(data.scan_angle, cross_scan_along_scan_var_ratio=10)\n astrometric_data['epoch_delta_t'] = data.julian_day_epoch()\n plot_fitting_to_astrometric_data(astrometric_data)\n\n plt.show()\n","sub_path":"htof/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":16967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"362517678","text":"#https://practice.geeksforgeeks.org/problems/overlapping-intervals/0\n#a = map(int,input().split())\n#a = list(map(int,input().split()))\n\ntest = int(input())\nfor i in range(test):\n n = int(input())\n l = list(map(int,input().split()))\n j = 0\n A = []\n while j <2*n:\n A.append([l[j],l[j+1]])\n j+=2\n A.sort()\n stack = [A[0]]\n for i in range(1,n):\n \n pre = stack.pop()\n a = pre[0]\n b = pre[1]\n \n cur = A[i]\n c = cur[0]\n d = cur[1]\n \n \n if a<=c and c<=b:\n if d<=b:\n stack.append([a,b])\n else:\n stack.append([a,d])\n \n else:\n stack.append(pre)\n stack.append(cur)\n\n ans = []\n for i in stack:\n print(str(i[0]),end=\" \")\n print(str(i[1]),end=\" \")\n print()\n \n \n","sub_path":"Array/overlapping-intervals.py","file_name":"overlapping-intervals.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"124505053","text":"def fibonacci():\r\n f_k2 = 0\r\n f_k1 = 1\r\n sum_fib = 0\r\n n = int(input(\"Hvor mange fibonacci-tall ønsker du? \"))\r\n if n > 0:\r\n print(\"Fibonacci-tall:\", 0, \"Summen til nå er\", 0)\r\n for i in range(n):\r\n f_k = f_k1 + f_k2 #regner ut neste\r\n f_k2 = f_k1\r\n f_k1 = f_k\r\n sum_fib = sum_fib + f_k\r\n print(\"Fibonacci-tall:\", f_k, \"Summen til nå er\", sum_fib)\r\n\r\ndef fibonacci_liste():\r\n n = int(input(\"Hvor mange fibonacci-tall ønsker du? \"))\r\n print(\"Fibonacci-tall \\tSummen \")\r\n print(\"----------------------------\")\r\n f_k2 = 0\r\n f_k1 = 1\r\n sum_fib = 0\r\n for i in range(n):\r\n f_k = f_k1 + f_k2 #regner ut neste\r\n f_k2 = f_k1\r\n f_k1 = f_k\r\n sum_fib = sum_fib + f_k\r\n print(f_k,\"\\t\\t\",sum_fib)\r\n\r\n#fibonacci()\r\nfibonacci_liste()\r\n","sub_path":"tdt4110/Øving 3/fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"22793471","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Author: Goddy 2019/6/10\n# Desc: \n\nimport numpy as np\n\n\nclass Regression:\n\n @staticmethod\n def rss_error(y_array, y_hat_array):\n return ((y_array - y_hat_array)**2).sum()\n\n @staticmethod\n def lwlr(test_point, x_array, y_array, k=1.0):\n \"\"\"\n Locally Weight Linear Regression 局部加权线性回归\n :param test_point:\n :param x_array:\n :param y_array:\n :param k:\n :return:\n \"\"\"\n x_matrix = np.mat(x_array)\n y_matrix = np.mat(y_array).T\n m = np.shape(x_matrix)[0]\n # 对角矩阵\n weights = np.mat(np.eye(m))\n for j in range(m):\n diff_matrix = test_point - x_matrix[j, :]\n weights[j, j] = np.exp(diff_matrix * diff_matrix.T / (-2.0 * k**2))\n xTx = x_matrix.T * (weights * x_matrix)\n if np.linalg.det(xTx) == 0.0:\n print('this matrix is singular, cannot do inverse')\n return\n ws = xTx.T * (x_matrix.T * (weights * y_matrix))\n return test_point * ws\n\n @staticmethod\n def lwlr_test(test_array, x_array, y_array, k=1.0):\n m = np.shape(test_array)[0]\n y_hat = np.zeros(m)\n for i in range(m):\n y_hat[i] = Regression.lwlr(test_array[i], x_array, y_array, k)\n return y_hat\n\n @staticmethod\n def stand_regress(x_array, y_array):\n \"\"\"\n w最优 = (X^T · X)^-1 · X^T · y\n \"\"\"\n x_matrix = np.mat(x_array)\n y_matrix = np.mat(y_array).T\n xTx = x_matrix.T * x_matrix\n if np.linalg.det(xTx) == 0.0:\n print('this matrix is singular, cannot do inverse')\n return\n ws = xTx.I * (x_matrix.T * y_matrix)\n return ws\n\n","sub_path":"regression8/regression_core.py","file_name":"regression_core.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"34803207","text":"def dfs(i, j, visit, grd):\n if visit[i][j] or grd[i][j] != 1:\n return\n \n visit[i][j] = True\n dfs(i - 1, j, visit, grd)\n dfs(i + 1, j, visit, grd)\n dfs(i, j + 1, visit, grd)\n dfs(i, j - 1, visit, grd)\n \n\nN = int(input())\ngrid = [[0] + [int(i) for i in input().split()] + [0] for j in range(N)]\nvisited = [[False for i in range(N + 2)] for j in range(N + 2)]\ncount = 0\n\ngrid.append([0 for i in range(N + 2)])\ngrid.insert(0, [0 for i in range(N + 2)])\n\nfor i in range(1, N + 1):\n for j in range(1, N + 1):\n if grid[i][j] == 1 and not visited[i][j]:\n count += 1\n dfs(i, j, visited, grid)\n\nprint(count)","sub_path":"Blue 5 - Water Blobs/blobs.py","file_name":"blobs.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"486455449","text":"import tornado.web\nimport tornado.escape\nimport json\nimport http.client\nimport sys\nimport logging\nimport redis\nfrom sqlalchemy import or_\nfrom voluptuous import MultipleInvalid\nfrom tornado import gen\nfrom urllib.parse import urljoin\nfrom datetime import datetime\nfrom raven.contrib.tornado import SentryMixin\nfrom tornado.httpclient import AsyncHTTPClient, HTTPError\nfrom seplis import utils, schemas\nfrom seplis.api.decorators import new_session\nfrom seplis.api.connections import database\nfrom seplis.config import config\nfrom seplis.api import models, exceptions, constants\nfrom seplis.api.base.user import User\nfrom seplis.api.base.pagination import Pagination\nfrom seplis.api.decorators import authenticated\n\nclass Handler(tornado.web.RequestHandler, SentryMixin):\n\n def initialize(self):\n self.access_token = None\n if self.request.body:\n try:\n self.request.body = utils.json_loads(self.request.body)\n except ValueError:\n self.request.body = {}\n else:\n self.request.body = {}\n\n def set_default_headers(self):\n self.set_header('Cache-Control', 'no-cache, must-revalidate')\n self.set_header('Expires', 'Sat, 26 Jul 1997 05:00:00 GMT')\n self.set_header('Content-Type', 'application/json')\n self.set_header('Access-Control-Allow-Origin', '*')\n self.set_header('Access-Control-Allow-Headers', 'Authorization, Content-Type, If-Match, If-Modified-Since, If-None-Match, If-Unmodified-Since, X-Requested-With')\n self.set_header('Access-Control-Allow-Methods', 'GET, POST, PATCH, PUT, DELETE')\n self.set_header('Access-Control-Expose-Headers', 'ETag, Link, X-Total-Count, X-Total-Pages')\n self.set_header('Access-Control-Max-Age', '86400')\n self.set_header('Access-Control-Allow-Credentials', 'true')\n\n def write_error(self, status_code, **kwargs):\n if isinstance(kwargs['exc_info'][1], exceptions.API_exception):\n self.write_object({\n 'code': kwargs['exc_info'][1].code,\n 'message': kwargs['exc_info'][1].message,\n 'errors': kwargs['exc_info'][1].errors,\n 'extra': kwargs['exc_info'][1].extra,\n })\n return \n elif isinstance(kwargs['exc_info'][1], TypeError):\n self.set_status(400)\n self.write_object({\n 'code': None,\n 'message': str(kwargs['exc_info'][1]),\n 'errors': None,\n 'extra': None,\n })\n return\n if hasattr(kwargs['exc_info'][1], 'log_message') and kwargs['exc_info'][1].log_message:\n msg = kwargs['exc_info'][1].log_message\n else:\n msg = http.client.responses[status_code]\n self.write_object({\n 'code': None, \n 'message': msg, \n 'errors': None,\n 'extra': None,\n })\n\n def write_object(self, obj):\n if isinstance(obj, Pagination):\n self.write_pagination(obj)\n return\n self.write(\n utils.json_dumps(obj, indent=4, sort_keys=True),\n )\n\n def write_pagination(self, pagination):\n links = pagination.links_header_format(\n urljoin(\n '{}://{}'.format(\n self.request.protocol,\n self.request.host,\n ), \n self.request.path\n ), \n self.request.query_arguments,\n )\n if links:\n self.set_header('Link', links)\n self.set_header('X-Total-Count', pagination.total)\n self.set_header('X-Total-Pages', pagination.pages)\n self.write_object(pagination.records)\n\n @property\n def executor(self):\n return self.application.executor\n\n @property\n def redis(self):\n return database.redis\n\n @gen.coroutine\n def es(self, url, query={}, body={}):\n http_client = AsyncHTTPClient() \n if not url.startswith('/'):\n url = '/'+url\n for arg in query:\n if not isinstance(query[arg], list):\n query[arg] = [query[arg]]\n try:\n response = yield http_client.fetch(\n 'http://{}{}?{}'.format(\n config['api']['elasticsearch'],\n url,\n utils.url_encode_tornado_arguments(query) \\\n if query else '',\n ),\n method='POST' if body else 'GET',\n body=utils.json_dumps(body) if body else None,\n )\n return utils.json_loads(response.body)\n except HTTPError as e:\n try:\n extra = utils.json_loads(e.response.body)\n except:\n extra = {'error': e.response.body.decode('utf-8')}\n raise exceptions.Elasticsearch_exception(\n e.code,\n extra,\n )\n\n def get_current_user(self):\n auth = self.request.headers.get('Authorization', None)\n if not auth:\n return None\n bearer = auth.split(' ')\n if len(bearer) != 2:\n return None\n if bearer[0] != 'Bearer':\n raise tornado.web.HTTPError(400, 'Unrecognized token type')\n self.access_token = bearer[1]\n return User.get_from_token(self.access_token)\n\n def validate(self, schema, *arg, **args):\n try:\n if not isinstance(schema, schemas.Schema): \n schema = schemas.Schema(schema, *arg, **args) \n return schema(self.request.body) \n except MultipleInvalid as e:\n data = []\n for error in e.errors:\n path = '.'.join(str(x) for x in error.path)\n data.append({\n 'field': path,\n 'message': error.msg,\n })\n raise exceptions.Validation_exception(errors=data)\n\n @gen.coroutine\n def log_exception(self, typ, value, tb): \n tornado.web.RequestHandler.log_exception(self, typ, value, tb)\n if isinstance(value, exceptions.Elasticsearch_exception) and \\\n value.status_code != 404:\n pass\n elif isinstance(value, tornado.web.HTTPError) and value.status_code < 500:\n return\n yield gen.Task(\n self.captureException,\n exc_info=(typ, value, tb),\n data=[value.extra] if isinstance(value, exceptions.API_exception) and \\\n value.extra else None,\n )\n\n def get_sentry_user_info(self):\n return {\n 'user': {\n 'is_authenticated': True if self.current_user else False,\n 'info': self.current_user.to_dict() if self.current_user else None,\n }\n }\n\n def get_sentry_data_from_request(self):\n return {\n 'request': {\n 'url': self.request.full_url(),\n 'method': self.request.method,\n 'query_string': self.request.query,\n 'cookies': self.request.headers.get('Cookie', None),\n 'headers': dict(self.request.headers),\n }\n }\n\n def options(self, *args, **kwargs):\n pass\n\n @authenticated(constants.LEVEL_EDIT_USER)\n def check_edit_another_user_right(self):\n pass\n\n def check_user_edit(self, user_id):\n if int(user_id) != self.current_user.id:\n self.check_edit_another_user_right()\n return True\n\n @authenticated(constants.LEVEL_USER)\n def is_logged_in(self):\n pass\n\n def get_append_fields(self, allowed_append_fields):\n append_fields = list(\n filter(None, self.get_argument('append', '').split(','))\n )\n not_allowed = []\n for a in append_fields:\n if a not in allowed_append_fields:\n not_allowed.append(a)\n if not_allowed:\n raise exceptions.Append_fields_not_allowed(not_allowed)\n return append_fields\n\n image_remove_keys = (\n 'relation_type',\n 'relation_id',\n )\n def image_format(self, images):\n '''\n :param images: `dict` or list of `dict`\n '''\n if isinstance(images, list):\n for img in images:\n utils.keys_to_remove(\n self.image_remove_keys,\n img\n )\n else:\n utils.keys_to_remove(\n self.image_remove_keys,\n images\n )\n return images\n\n episode_remove_keys = (\n 'show_id',\n )\n def episode_format(self, episodes):\n '''\n :param episodes: `episode()` or list of `episode()`\n '''\n if isinstance(episodes, list):\n for episode in episodes:\n utils.keys_to_remove(\n self.episode_remove_keys,\n episode\n )\n else:\n utils.keys_to_remove(\n self.episode_remove_keys,\n episodes\n )\n return episodes","sub_path":"src/seplis/api/handlers/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":9114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"624336702","text":"import os\n\nfrom appJar import gui\n\napp = gui()\n\ndef press(button):\n if button == \"Start ACRTM\":\n app.stop()\n os.system('python run.py')\n if button == \"Choose Game Music\":\n app.stop()\n os.system('python MusicChooser.py')\n\napp.addButtons([\"Start ACRTM\", \"Choose Game Music\"], press)\napp.go()","sub_path":"launcher_windows.py","file_name":"launcher_windows.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"233586451","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport psycopg2\nimport pandas as pd\nimport GlobalContent\nimport numpy as np\nimport datetime as dt\nimport json\n\n\nclass Content:\n alarm_group_rdsectid = 'alarm_group_rdsectid'\n alarm_group_node = 'alarm_group'\n alarm_transfer_json_data = 'alarm_transfer_json_data'\n pass\n\n\ndef call_key_node(conn, start_date, end_date, start_time, end_time):\n date_interval = start_date + '~' + end_date\n time_interval = start_time + '~' + end_time\n rows = []\n sql = \"select group_id, key_node from {0} where alarm_date = '{1}' and time_point = '{2}'\"\\\n .format(GlobalContent.alarm_keynode, date_interval, time_interval)\n try:\n cr = conn.cursor()\n cr.execute(sql)\n rows = cr.fetchall()\n except Exception as e:\n print(\"call_key_node:\", e)\n\n return rows\n\n\n\ndef call_int_name(conn):\n rows = []\n sql = \"select scats_id,inter_name, english_inter_name from {0} where inter_name is not null\".format(GlobalContent.inter_info)\n try:\n cr = conn.cursor()\n cr.execute(sql)\n rows = cr.fetchall()\n except Exception as e:\n print(\"call_int_name:\", e)\n return rows\n\n\n# 获取组团路段信息\ndef call_postgres(conn, start_date, end_date, start_time, end_time):\n date_interval = start_date + '~' + end_date\n time_interval = start_time + '~' + end_time\n frame = pd.DataFrame({})\n match_alarm_records = pd.DataFrame({})\n try: # 数据库连接超时\n\n cr = conn.cursor()\n sql = \"select group_id, rdsectid, up_node, down_node from {0} where alarm_date = '{1}' and time_point ='{2}'\"\\\n .format(Content.alarm_group_rdsectid, date_interval, time_interval)\n cr.execute(sql)\n rows = cr.fetchall()\n frame = pd.DataFrame(rows)\n frame.columns = ['group_id', 'rdsectid', 'up_node', 'down_node']\n # print(frame)\n cr.close()\n\n except Exception as e:\n print('拥堵组团数据获取失败')\n print(e)\n\n return frame\n\n\n# 获取协调方向信息\ndef call_coor_dir(conn):\n sql = \"SELECT c.start_time,c.end_time,c.upstream_nodeid,c.downstream_nodeid,c.segment_dir,c.upstream_phasename,d.rdsectid from\" \\\n \"(SELECT a.*,b.segment_dir \" \\\n \"FROM(SELECT k.start_time,k.end_time,k.route_id,k.segment_id,k.route_name,t.upstream_nodeid,\" \\\n \"t.downstream_nodeid,t.upstream_phasename \" \\\n \"FROM(select m.start_time,m.end_time,m.route_id,m.route_name,n.segment_id \" \\\n \"from coor_route m ,coor_route_segment_rel n where m.route_id = n.route_id) K,coor_phase_diff t \" \\\n \"where k.segment_id=t.segment_id) a LEFT JOIN coor_route_segment b on a.segment_id = b.segment_id) c\" \\\n \"\tleft join pe_tobj_roadsect d on c.upstream_nodeid=d.new_up_nod and c.downstream_nodeid=d.new_down_n \" \\\n \"ORDER BY c.route_id\"\n # date_interval = start_date + '~' + end_date\n # time_interval = start_time + '~' + end_time\n frame = pd.DataFrame({})\n try: # 数据库连接超时\n # conn = psycopg2.connect(database=GlobalContent.pg_database72_research['database'],\n # user=GlobalContent.pg_database72_research['user'],\n # password=GlobalContent.pg_database72_research['password'],\n # host=GlobalContent.pg_database72_research['host'],\n # port=GlobalContent.pg_database72_research['port'])\n cr = conn.cursor()\n cr.execute(sql)\n rows = cr.fetchall()\n # print(rows)\n rows_list = []\n for i in rows:\n i = list(i)\n rows_list.append(i)\n i[0] = dt.time.strftime(i[0], '%H:%M:%S')\n i[1] = dt.time.strftime(i[1], '%H:%M:%S')\n # print(rows)\n frame = pd.DataFrame(rows_list)\n frame.columns = ['start_time', 'end_time', 'up_node', 'down_node', 'coor_dir','phase', 'rdsectid']\n cr.close()\n\n except Exception as e:\n print(e)\n\n # print(frame)\n return frame\n\n\n# 获取节点配置信息\ndef call_node_inf(conn):\n sql = \"select nodeid,systemid from pe_tobj_node \"\n rows_list = []\n try: # 数据库连接超时\n # conn = psycopg2.connect(database=GlobalContent.pg_database72_research['database'],\n # user=GlobalContent.pg_database72_research['user'],\n # password=GlobalContent.pg_database72_research['password'],\n # host=GlobalContent.pg_database72_research['host'],\n # port=GlobalContent.pg_database72_research['port'])\n cr = conn.cursor()\n cr.execute(sql)\n rows = cr.fetchall()\n rows_list = []\n for i in rows:\n i = list(i)\n rows_list.append(i)\n # print(rows_list)\n cr.close()\n except Exception as e:\n print(e)\n # print(frame)\n return rows_list\n\n\n# 获取组团节点信息\ndef call_group_node(conn, start_date, end_date, start_time, end_time):\n date_interval = start_date + '~' + end_date\n time_interval = start_time + '~' + end_time\n frame = pd.DataFrame({})\n rows_list = []\n sql = \"select group_id ,scats_id,alarm_times from {0} where alarm_date = '{1}' and time_point ='{2}'\"\\\n .format(Content.alarm_group_node, date_interval, time_interval)\n try:\n cr = conn.cursor()\n cr.execute(sql)\n rows = cr.fetchall()\n # print(rows)\n rows_list = []\n for i in rows:\n i = list(i)\n rows_list.append(i)\n cr.close()\n\n except Exception as e:\n print(\"call_group_node:\", e)\n # print(frame)\n return rows_list\n\n\n# 去除冗余,生成relation数据\ndef deleta_excess(data_list1, noid_inf):\n\n up_node = []\n relation = []\n down_node = []\n node_combine = []\n for i in range(len(data_list1)):\n if ([data_list1[i][0], data_list1[i][1]] not in node_combine) or \\\n ([data_list1[i][1], data_list1[i][0]] not in node_combine):\n up_node.append(data_list1[i][0])\n down_node.append(data_list1[i][1])\n node_combine.append([data_list1[i][0], data_list1[i][1]])\n fromId = data_list1[i][0]\n toId = data_list1[i][1]\n if data_list1[i][2] == 1:\n l2 = True\n else:\n l2 = False\n if data_list1[i][3] == 1:\n r2 = True\n else:\n r2 = False\n if data_list1[i][4] == 1:\n l1 = True\n else:\n l1 = False\n if data_list1[i][5] == 1:\n r1 = True\n else:\n r1 = False\n for j in noid_inf:\n if fromId == j[1]:\n fromId = j[0]\n if toId == j[1]:\n toId = j[0]\n whichLine = {'l1': l1, 'l2': l2, 'r1': r1, 'r2': r2}\n busy_rdsect = {'fromId': fromId, 'toId': toId, 'whichLine': whichLine}\n relation.append(busy_rdsect)\n # print(relation)\n return relation\n\n\n# 匹配报警次数,生成data\ndef alarm_times_match(group_data_node,noid_inf, group_key_node, int_name, int_order):\n data = []\n data_order = []\n # print(group_data_node)\n # print(noid_inf)\n # print(int_name)\n # print(group_key_node)\n # input()\n for i in range(len(group_data_node)):\n isKey = False\n name = '无'\n scats_id = group_data_node[i][1]\n alarm_times = group_data_node[i][2]\n nodeid = scats_id\n for j in noid_inf:\n if group_data_node[i][1] == j[1]:\n nodeid = j[0]\n else:\n pass\n for j in group_key_node:\n if scats_id == j[1]:\n isKey = True\n for j in int_name:\n\n if scats_id == j[0]:\n if j[2] is not None:\n name = j[2]\n else:\n name = j[1]\n alarm_times ={'id': nodeid, 'x': None, 'y': None, 'value': scats_id, 'isKey': isKey, 'name': name}\n data.append(alarm_times)\n for i in int_order:\n for j in data:\n if j['value']== i:\n data_order.append(j)\n else:\n pass\n # print(data_order)\n return data_order\n\n\n# 发送json格式数据\ndef json_send(conn, data_type, sub_id, var_data, start_date, end_date, start_time, end_time):\n start_date = str(start_date.replace('-', ''))\n end_date = str(end_date.replace('-', ''))\n sql_delete = \"delete from {0} where data_type ='{1}' and sub_id ='{2}' and start_date ='{3}' and end_date ='{4}' \" \\\n \"and start_time='{5}' and end_time ='{6}'\"\\\n .format(Content.alarm_transfer_json_data, data_type, sub_id, start_date, end_date, start_time, end_time)\n sql_send = \"insert into {0}(data_type,sub_id,start_date,end_date,start_time,end_time,json_data) \" \\\n \"values(%s,%s,%s,%s,%s,%s,%s)\".format(Content.alarm_transfer_json_data)\n try: # 数据库连接超时\n # conn = psycopg2.connect(database=GlobalContent.pg_database72_research['database'],\n # user=GlobalContent.pg_database72_research['user'],\n # password=GlobalContent.pg_database72_research['password'],\n # host=GlobalContent.pg_database72_research['host'],\n # port=GlobalContent.pg_database72_research['port'])\n cr = conn.cursor()\n except Exception as e:\n print('json_send', e)\n\n else:\n # print(sql_delete)\n # print(start_date, end_date)\n try:\n cr.execute(sql_send, (data_type, sub_id, start_date, end_date, start_time, end_time, var_data))\n conn.commit()\n except psycopg2.IntegrityError:\n conn.commit()\n cr.execute(sql_delete)\n conn.commit()\n cr.execute(sql_send, (data_type, sub_id, start_date, end_date, start_time, end_time, var_data))\n print('json_send 数据重复,已删除并重新插入数据')\n conn.commit()\n cr.close()\n\n\n# 获取路段地理信息\ndef call_rdsect_geom(start_date, end_date, start_time, end_time):\n date_interval = start_date + '~' + end_date\n time_interval = start_time + '~' + end_time\n frame = pd.DataFrame({})\n rows_list = []\n rows_node_list = []\n rows_rdsect_list = []\n sql_node = \"select group_id, scats_id, ST_AsText(geom) from {0} where alarm_date = '{1}' and time_point ='{2}'\" \\\n .format(Content.alarm_group_node, date_interval, time_interval)\n sql_rdsect = \"select group_id, rdsectid, ST_AsText(geom) from {0} where alarm_date = '{1}' and time_point ='{2}'\"\\\n .format(Content.alarm_group_rdsectid, date_interval, time_interval)\n\n try: # 数据库连接超时\n conn = psycopg2.connect(database=GlobalContent.pg_database72_research['database'],\n user=GlobalContent.pg_database72_research['user'],\n password=GlobalContent.pg_database72_research['password'],\n host=GlobalContent.pg_database72_research['host'],\n port=GlobalContent.pg_database72_research['port'])\n cr = conn.cursor()\n cr.execute(sql_node)\n rows_node = cr.fetchall()\n\n cr.execute(sql_rdsect)\n rows_rdsect = cr.fetchall()\n # print(rows)\n rows_node_list = []\n for i in rows_node:\n i = list(i)\n rows_node_list.append(i)\n rows_rdsect_list = []\n for i in rows_rdsect:\n i = list(i)\n rows_rdsect_list.append(i)\n cr.close()\n conn.close()\n except Exception as e:\n print(e)\n # print(rows_node_list)\n # print(rows_rdsect_list)\n return rows_node_list, rows_rdsect_list\n\n\n# 发送JSON地理信息数据\ndef geom_json(conn, start_date, end_date, start_time, end_time, noid_inf):\n if len(noid_inf)!=0:\n node_geom, rdsect_geom = call_rdsect_geom(start_date, end_date, start_time, end_time)\n group_id = []\n all_geom = []\n var_data = []\n [group_id.append(node[0]) for node in node_geom]\n group_id = set(group_id)\n group_id = sorted(group_id)\n # print(group_id)\n for i in group_id:\n scats_id = []\n node_id = []\n group_node = []\n group_rdsect = []\n for j in node_geom:\n if j[0] == i:\n scats_id.append(j[1])\n group_node.append(j[2])\n for m in rdsect_geom:\n if m[0] == i:\n\n group_rdsect.append(m[2])\n i = i.replace('_', '')\n for m in scats_id:\n for n in noid_inf:\n if n[1] == m:\n node_id.append(n[0])\n\n group_data = {'Name': i, 'Data': {\"NodeId\": node_id, \"NodeGeom\": group_node, \"RdsectGeom\": group_rdsect}}\n # id = i[0:5]+i[-1:]\n all_geom.append(group_data)\n var_data = {\"GeomData\": all_geom}\n var_data = json.dumps(var_data, ensure_ascii=False)\n else:\n var_data = []\n\n data_type = \"2\"\n sub_id = \"brain\"\n json_send(conn, data_type, sub_id, var_data, start_date, end_date, start_time, end_time)\n # print(var_data)\n\n\n# 主程序\ndef main(start_date, end_date, start_time, end_time, int_order):\n # print(int_order)\n dt_start = dt.datetime.strptime(start_time,\"%H:%M:%S\")\n dt_end = dt.datetime.strptime(end_time, \"%H:%M:%S\")\n # call_postgres('2018-03-05', '2018-03-15', '07:00:00', '09:00:00')\n group_data_node = []\n group_data_road = []\n try:\n conn = psycopg2.connect(database=GlobalContent.pg_database72_research['database'],\n user=GlobalContent.pg_database72_research['user'],\n password=GlobalContent.pg_database72_research['password'],\n host=GlobalContent.pg_database72_research['host'],\n port=GlobalContent.pg_database72_research['port'])\n except Exception as e:\n print('conn:', e)\n else:\n # 获取组团节点信息\n group_data_node = call_group_node(conn, start_date, end_date, start_time, end_time)\n # 获取组团路段信息\n group_data_road = call_postgres(conn, start_date, end_date, start_time, end_time)\n # 获取关键点信息\n group_key_node = call_key_node(conn, start_date, end_date, start_time, end_time)\n int_name = call_int_name(conn)\n\n if len(group_data_road) > 0:\n group_id_list = np.array(group_data_road['group_id']).tolist()\n group_id_list = set(group_id_list)\n group_id_list = sorted(group_id_list)\n # print(group_id_list)\n noid_inf = call_node_inf(conn)\n coor_dir = call_coor_dir(conn)\n # 匹配在时间段内生效的协调策略\n match_coor = coor_dir[((coor_dir['start_time'] > start_time) & (coor_dir['start_time'] < end_time)) |\n ((coor_dir['end_time'] > start_time) & (coor_dir['end_time'] < end_time)) |\n ((coor_dir['start_time'] < start_time) & (coor_dir['end_time'] > end_time))]\n # print(match_coor)\n # print(group_data_road)\n\n coor_dir_up = []\n coor_dir_down = []\n for indexs in match_coor.index:\n up_nodeid = match_coor.loc[indexs].values[2]\n down_nodeid = match_coor.loc[indexs].values[3]\n coor_rdsectid = match_coor.loc[indexs].values[3]\n for j in noid_inf:\n if up_nodeid == j[0]:\n coor_dir_up.append(j[1])\n if down_nodeid == j[0]:\n coor_dir_down.append(j[1])\n # print(group_id_list, int_order)\n for k in group_id_list:\n match_order = []\n for j in int_order:\n if j[0] == k.replace('_',''):\n match_order = j[1]\n # print(match_order, k)\n # input()\n match_data = group_data_road[group_data_road['group_id'] == k]\n match_node = []\n for j in group_data_node:\n if j[0] == k:\n match_node.append(j)\n up_node = np.array(match_data['up_node']).tolist()\n down_node = np.array(match_data['down_node']).tolist()\n rdsectid = np.array(match_data['rdsectid']).tolist()\n\n # print(coor_dir)\n # match_coor = coor_dir[((coor_dir['start_time'] > start_time) & (coor_dir['start_time'] < end_time)) |\n # ((coor_dir['end_time'] > start_time) & (coor_dir['end_time'] < end_time)) |\n # ((coor_dir['start_time'] < start_time) & (coor_dir['end_time'] > end_time))]\n # # print(match_coor)\n # # print(group_data_road)\n # road_dir = []\n # coor_dir_up = []\n # coor_dir_down = []\n # for indexs in match_coor.index:\n # up_nodeid = match_coor.loc[indexs].values[2]\n # down_nodeid = match_coor.loc[indexs].values[3]\n # coor_rdsectid = match_coor.loc[indexs].values[3]\n # for j in noid_inf:\n # if up_nodeid == j[0]:\n # coor_dir_up.append(j[1])\n # if down_nodeid == j[0]:\n # coor_dir_down.append(j[1])\n road_dir = []\n for i in range(len(up_node)):\n forward = 1\n backward = 0\n coor_forward = 0\n coor_backward = 0\n for j in range(len(down_node)):\n if up_node[i] == down_node[j] and up_node[j] == down_node[i]:\n backward = -1\n for m in range(len(coor_dir_up)):\n if up_node[i] == coor_dir_up[m] and down_node[i] == coor_dir_down[m]:\n coor_forward = 1\n if down_node[i] == coor_dir_up[m] and up_node[i] == coor_dir_down[m]:\n coor_backward = 1\n road_dir.append([up_node[i], down_node[i], forward, backward, coor_forward, coor_backward])\n\n data = alarm_times_match(match_node, noid_inf, group_key_node, int_name, match_order)\n relation = deleta_excess(road_dir, noid_inf)\n data_type = \"1\"\n # 日期格式转换\n k = k.replace('_', '')\n sub_id = k\n var_data = {'relation': relation, 'data': data}\n\n var_data = json.dumps(var_data, ensure_ascii=False)\n # print(var_data)\n json_send(conn, data_type, sub_id, var_data, start_date, end_date, start_time, end_time)\n geom_json(conn, start_date, end_date, start_time, end_time, noid_inf)\n # print(var_data)\n else:\n geom_json(conn, start_date, end_date, start_time, end_time, [])\n print(\"无组团数据,退出程序\")\n pass\n conn.close()\n\n\nif __name__ == '__main__':\n # geom_json('2018-04-01', '2018-04-10', '07:00:00', '09:00:00')\n # call_rdsect_geom('2018-03-05', '2018-03-15', '07:00:00', '09:00:00')\n main('2018-05-28', '2018-05-28', '06:00:00', '06:15:00', [])\n # call_coor_dir('2018-03-05', '2018-03-15', '07:00:00', '09:00:00')\n # call_node_inf()\n # call_coor_dir()","sub_path":"xjc_pyfile/proj/proj/python_project/ali_alarm/2018-7-27back/alarm_group_page.py","file_name":"alarm_group_page.py","file_ext":"py","file_size_in_byte":19936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"598959930","text":"\"\"\"RobotLightP controller.\"\"\"\n\n# You may need to import some classes of the controller module. Ex:\n# from controller import Robot, Motor, DistanceSensor\nfrom controller import Robot\nfrom controller import Motor\nfrom controller import Emitter\nfrom controller import Receiver\nfrom controller import DistanceSensor\nfrom controller import LightSensor\nfrom controller import Compass\nfrom random import randint\nimport math\nimport struct \n\nif __name__ == '__main__':\n\n # create the Robot instance.\n robot = Robot()\n \n # get the time step of the current world.\n timestep = int(robot.getBasicTimeStep())\n \n # initialize motors\n wheels = []\n wheelsNames = ['wheel1', 'wheel2', 'wheel3', 'wheel4']\n for i in range(4):\n wheels.append(robot.getDevice(wheelsNames[i]))\n wheels[i].setPosition(float('inf'))\n wheels[i].setVelocity(0.0)\n \n # initialize emmiters \n emm = robot.getDevice('trans')\n \n #Вводим количество членов группы не включая самого робота (т.е. n-1)\n k = 24\n \n #initialize receiver \n rec = [] \n recNames = ['rec1', 'rec2', 'rec3', 'rec4', 'rec5', 'rec6', 'rec7', 'rec8', 'rec9', 'rec10', 'rec11', 'rec12', 'rec13', 'rec14', 'rec15', 'rec16', 'rec17', 'rec18', 'rec19', 'rec20', 'rec21', 'rec22', 'rec23', 'rec24']\n for i in range(k):\n rec.append(robot.getDevice(recNames[i]))\n rec[i].enable(timestep)\n \n # initialize distance sensor \n ds = robot.getDevice('ds')\n ds.enable(timestep)\n \n # initialize motors\n ls = []\n lsNames = ['ls1', 'ls2', 'ls3', 'ls4']\n for i in range(4):\n ls.append(robot.getDevice(lsNames[i]))\n ls[i].enable(timestep)\n \n # initialize distance sensor \n com = robot.getDevice('com')\n com.enable(timestep)\n \n #Начальное значение на двигатели\n leftSpeed = 0\n rightSpeed = 0 \n \n # Переменные для задания обхода препятствия\n avoidObstacleCounter = 0\n counter = 0\n j = 0\n p = 0\n # Main loop:\n # - perform simulation steps until Webots is stopping the controller\n while robot.step(timestep) != -1:\n print (robot.getName()) \n #Ищем максимум из датчиков\n light = []\n for i in range(4):\n light.append(ls[i].getValue())\n #print (light[i])\n max = light[0]\n for i in range(4):\n if light[i] > max:\n max = light[i]\n \n #Расчет азимута движения на источник по четырем сенсорам света.\n #Выбираем датчик с максимальным уровнем излучения. Сравниваем с соседними.\n #Выбираем второй по мощности излучения датчик.\n #Расчитываем доворо по часовой стролке до источника излучения.\n #Расчитываем желаемый азимут.\n print(max)\n error = 0\n light_max = 0\n light_min = 0\n if max != 0:\n a = 0\n b = 0 \n if max == light[0]:\n a = light[0] - light[3] \n b = light[0] - light[1]\n if a < b and light[3] != 0: \n error = (light[0]*90)/(light[0]+light[3])-45\n \n elif b < a and light[1] != 0:\n error = 45 + (light[1]*90)/(light[0]+light[1])\n \n else: \n dbearing = 45 \n \n elif max == light[1]:\n a = light[1] - light[0]\n b = light[1] - light[2]\n if a <= b and light[0] != 0:\n error = 45 + (light[1]*90)/(light[0]+light[1])\n \n elif b < a and light[2] != 0:\n error = 135 + (light[2]*90)/(light[2]+light[1])\n \n else: \n error = 135\n \n elif max == light[2]:\n a = light[2] - light[1] \n b = light[2] - light[3] \n if a <= b and light[1] != 0: \n error = 135 + (light[2]*90)/(light[2]+light[1])\n \n elif b < a and light[3] != 0:\n error = 225 + (light[3]*90)/(light[2]+light[3])\n \n else: \n error = 225\n \n elif max == light[3]:\n a = light[3] - light[2] \n b = light[3] - light[0]\n if a <= b and light[2] != 0: \n error = 225 + (light[3]*90)/(light[2]+light[3])\n \n elif b < a and light[0] != 0:\n error = 315 + (light[0]*90)/(light[0]+light[3])\n \n else: \n error = 315 \n \n print(\"error=\", error)\n # print (robot.getName())\n # print(\"bearing =\", bearing)\n # print(\"dbearing =\", dbearing)\n \n #Вводим уверенность в курсем q по датчикам света \n # датчику направления. a_q - коэфициент\n # d - показаия датчика дистанции.\n q = 0\n a_q = 0.5\n d = ds.getValue()\n if light[0]+light[3] == 0:\n q = 0\n else:\n q = (1-a_q)*(1 - abs((light[0]-light[3])/(light[0]+light[3]))) + a_q*(d/1000) \n \n #Передаем сообщение соседям\n message = struct.pack(\"dd\",error,q)\n emm.send(message)\n \n #Принимаем сообщение\n k_t = k\n bearingn = [0] * k\n for i in range(k):\n bearingn [i] = [0] * 2 \n #print (bearingn [i][1])\n for i in range (k):\n if rec[i].getQueueLength() > 0:\n message = rec[i].getData()\n dataList = struct.unpack(\"dd\",message)\n bearingn [i][0] = dataList[0]\n bearingn [i][1] = dataList[1]\n rec[i].nextPacket()\n # print(\"Hello\")\n print(bearingn[i][0], bearingn[i][1])\n else:\n k_t -= 1 \n bearingn [i][1] = -1\n print (\"k_t = \", k_t) \n #Расчитываем sigma\n alpha = 0.8\n deltaq = 0\n sigma_t = 0\n for i in range (k):\n if bearingn [i][1] != -1:\n deltaq += bearingn[i][1] - q\n print (\"deltaq= \", deltaq)\n if k_t == 0:\n sigma_t = 0 \n else: \n sigma_t = (alpha*deltaq)/k_t\n \n #Расчитываем гамма^i_t \n if q == 0:\n q=0.01\n gamma_t = 1/(q+sigma_t)\n \n # Расчтыаем сумму разностей\n cos_delta_sum = 0\n sin_delta_sum = 0\n cos_error = math.cos(math.radians(error))\n sin_error = math.sin(math.radians(error)) \n \n for i in range (k):\n if bearingn [i][1] != -1:\n cos_bearingn = math.cos(math.radians(bearingn [i][0]))\n sin_bearingn = math.sin(math.radians(bearingn [i][0])) \n cos_delta_sum += cos_bearingn*bearingn [i][1] - cos_error*q\n sin_delta_sum += sin_bearingn*bearingn [i][1] - sin_error*q\n #Расчитываем курс в группе dbearingG исходя из данных группы\n #alpha - коэфициент, p - уверенность к курсу при пересчете от группы\n \n dbearingG = 0\n if k_t == 0:\n dbearingG = error\n else: \n cos_dbearingG = cos_error*(1-(sigma_t*gamma_t))+(alpha*gamma_t*cos_delta_sum)/k_t \n sin_dbearingG = sin_error*(1-(sigma_t*gamma_t))+(alpha*gamma_t*sin_delta_sum)/k_t\n \n print(\"cos\", cos_dbearingG)\n \n if cos_dbearingG < -1:\n cos_dbearingG = -1\n if cos_dbearingG > 1:\n cos_dbearingG = 1\n if cos_dbearingG > 0 and sin_dbearingG > 0:\n dbearingG = math.degrees(math.acos(cos_dbearingG))\n elif cos_dbearingG > 0 and sin_dbearingG < 0:\n dbearingG = 360 - math.degrees(math.acos(cos_dbearingG))\n elif cos_dbearingG < 0 and sin_dbearingG > 0:\n dbearingG = 180 - math.degrees(math.acos(cos_dbearingG)) \n elif cos_dbearingG < 0 and sin_dbearingG < 0:\n dbearingG = 180 + math.degrees(math.acos(cos_dbearingG))\n \n print(\"errorG=\", dbearingG)\n \n #Задаем движение\n if dbearingG <= 5 or dbearingG >= 355: \n leftSpeed = 3.14 \n rightSpeed = 3.14\n elif dbearingG <= 175:\n leftSpeed = 3.14\n rightSpeed = 3.14*(1-dbearingG/180) \n elif dbearingG >= 185: \n leftSpeed = 3.14*((dbearingG/180)-1)\n rightSpeed = 3.14 \n else:\n counter = 50\n \n #Обход препятствий\n print(\"d\",d)\n if d <= 600 and avoidObstacleCounter == 0:\n avoidObstacleCounter = 1\n if light[0] == light[1] == light[2] == light[3]:\n p = randint(0,1)\n elif max == light[0] or max == light[1]: \n p = 0 #право\n elif max == light[2] or max == light[3]:\n p = 1 #влево \n \n \n if avoidObstacleCounter != 0:\n if d > 600:\n avoidObstacleCounter = 0\n else:\n avoidObstacleCounter -= 1\n \n if p == 1:\n leftSpeed = -2\n rightSpeed = 2 \n elif p == 0:\n leftSpeed = 2\n rightSpeed = -2\n \n if counter != 0:\n leftSpeed = -2\n rightSpeed = 2\n counter -= 1 \n \n print (\"LeftSpeed\", leftSpeed)\n print (\"RightSpeed\", rightSpeed) \n \n #Отправляем значение на моторы\n wheels[0].setVelocity(leftSpeed)\n wheels[1].setVelocity(rightSpeed)\n wheels[2].setVelocity(leftSpeed)\n wheels[3].setVelocity(rightSpeed)\n \n # Read the sensors:\n # Enter here functions to read sensor data, like:\n # val = ds.getValue()\n \n # Process sensor data here.\n \n # Enter here functions to send actuator commands, like:\n # motor.setPosition(10.0)\n pass\n \n # Enter here exit cleanup code.\n ","sub_path":"controllers/RobotLightP_4_robota/RobotLightP_4_robota.py","file_name":"RobotLightP_4_robota.py","file_ext":"py","file_size_in_byte":10977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"635517315","text":"import collections\n\nfrom .interface import ConcreteImpl, TupleTy, RecordType, NativeTy, PointerTy, StackTy, BoolTy\nfrom common import fresh_name\nimport predicates\n\ndef is_numeric(ty):\n return ty.lower() in (\"int\", \"integer\", \"long\", \"short\", \"float\", \"double\", \"btscalar\")\n\nclass VolumeTree(ConcreteImpl):\n\n class VolumeSpec(object):\n def __init__(self, lts, gts):\n self.lts = lts\n self.gts = gts\n\n @staticmethod\n def infer_volume(fields, predicate):\n clauses = list(predicates.break_conj(predicate))\n lts = []\n gts = []\n types = set()\n for c in clauses:\n if (c.lhs.name in fields) == (c.rhs.name in fields):\n return\n if c.rhs.name in fields:\n c = c.flip()\n if not is_numeric(fields[c.lhs.name]):\n return\n types.add(fields[c.lhs.name])\n if c.op in (predicates.Lt, predicates.Le):\n lts.append((c.lhs.name, c.rhs.name))\n if c.op in (predicates.Gt, predicates.Ge):\n gts.append((c.lhs.name, c.rhs.name))\n # print(\"; \".join(str(c) for c in clauses))\n # print(lts)\n # print(gts)\n if len(lts) != len(gts):\n return\n if len(types) != 1:\n return\n yield VolumeTree.VolumeSpec(lts, gts) # todo: permutations?\n\n def __init__(self, spec, fields, predicate, stack_iteration=False):\n self.stack_iteration = stack_iteration\n self.spec = spec\n self.field_types = fields\n self.the_type = NativeTy(fields[spec.lts[0][0]])\n self.predicate = predicate\n self.root = fresh_name(\"root\")\n self.left_ptr = fresh_name(\"left\")\n self.right_ptr = fresh_name(\"right\")\n self.leaf_ptr = fresh_name(\"leaf\")\n self.stack_name = fresh_name(\"stack\")\n self.prev_name = fresh_name(\"prev\")\n self.cursor_name = fresh_name(\"cursor\")\n self.parent_ptr = fresh_name(\"parent\")\n self.record_parent_ptr = fresh_name(\"parent\")\n self.remap = { f : fresh_name(f) for f, _ in (self.spec.lts + self.spec.gts) }\n\n myfields = [f for f, _ in spec.lts] + [f for f, _ in spec.gts]\n self.node_type = TupleTy(collections.OrderedDict(\n [(self.remap[f], NativeTy(fields[f])) for f in myfields]))\n self.node_type.fields[self.left_ptr] = PointerTy(self.node_type)\n self.node_type.fields[self.right_ptr] = PointerTy(self.node_type)\n self.node_type.fields[self.parent_ptr] = PointerTy(self.node_type)\n self.node_type.fields[self.leaf_ptr] = RecordType()\n self.node_type = PointerTy(self.node_type)\n\n def __str__(self):\n return \"VolumeTree({}, si={})\".format(\", \".join(\"{}-{}\".format(f1, f2) for (f1, _), (f2, _) in zip(self.spec.lts, self.spec.gts)), self.stack_iteration)\n def __repr__(self):\n return self.__str__()\n\n def fields(self):\n return ((self.root, self.node_type),)\n def construct(self, gen, parent_structure):\n return gen.set(parent_structure.field(gen, self.root), gen.null_value())\n def needs_var(self, v):\n return v in ([v for _, v in self.spec.lts] + [v for _, v in self.spec.gts])\n def state(self):\n if self.stack_iteration:\n return [\n (self.stack_name, StackTy(self.node_type)),\n (self.prev_name, RecordType()),\n (self.cursor_name, RecordType())]\n return [(self.prev_name, RecordType()), (self.cursor_name, RecordType())]\n def private_members(self):\n return [(self.record_parent_ptr, self.node_type)]\n def gen_query(self, gen, qvars, parent_structure):\n field = parent_structure.field\n if self.stack_iteration:\n stk = fresh_name(\"stack\")\n proc = gen.decl(stk, StackTy(self.node_type), gen.new_stack(self.node_type))\n proc += gen.stack_size_hint(stk, \"100\")\n proc += gen.if_true(gen.not_true(gen.is_null(field(gen, self.root))))\n proc += gen.stack_push(stk, field(gen, self.root))\n proc += gen.endif()\n cursor = fresh_name(\"cursor\")\n prev = fresh_name(\"prev\")\n proc += gen.decl(cursor, RecordType(), gen.null_value())\n proc += gen.decl(prev, RecordType(), gen.null_value())\n proc += self._gen_advance(gen, stk, cursor, prev)\n return proc, [stk, gen.null_value(), cursor]\n cursor = fresh_name(\"cursor\")\n proc = gen.decl(cursor, RecordType(), gen.null_value())\n proc += gen.if_true(gen.not_true(gen.is_null(field(gen, self.root))))\n p, m = self.find_first(gen, field(gen, self.root))\n proc += p\n proc += gen.set(cursor, m)\n proc += gen.endif()\n return proc, [gen.null_value(), cursor]\n def gen_query_one(self, gen, qvars, parent_structure):\n return self.find_first(gen, parent_structure.field(gen, self.root))\n def gen_empty(self, gen, qvars):\n if self.stack_iteration:\n return [gen.new_stack(self.node_type), gen.null_value(), gen.null_value()]\n return [gen.null_value(), gen.null_value()]\n def gen_find_any(self, gen, parent_structure):\n cursor = fresh_name(\"cursor\")\n result = fresh_name(\"result\")\n proc = gen.decl(cursor, self.node_type, parent_structure.field(gen, self.root))\n proc += gen.while_true(gen.both(gen.not_true(gen.is_null(cursor)), self.is_leaf(gen, cursor)))\n proc += gen.set(cursor, gen.get_field(cursor, self.left_ptr))\n proc += gen.endif()\n proc += gen.decl(result, RecordType(), gen.ternary(gen.is_null(cursor), gen.null_value(), gen.get_field(cursor, self.leaf_ptr)))\n return proc, result\n def auxtypes(self):\n return (self.node_type.ty,)\n def distance(self, gen, record, node, remap={}):\n e = \"0\"\n for (f1, _), (f2, _) in zip(self.spec.lts, self.spec.gts):\n e = gen.add(e,\n gen.abs(gen.sub(\n gen.add(gen.get_field(node, self.remap[f1]), gen.get_field(node, self.remap[f2])),\n gen.add(remap.get(f1, gen.get_field(record, f1)), remap.get(f2, gen.get_field(record, f2))))))\n return e\n def select_child(self, gen, parent, record, remap={}):\n return gen.ternary(\n gen.lt(self.the_type,\n self.distance(gen, record, gen.get_field(parent, self.right_ptr), remap=remap),\n self.distance(gen, record, gen.get_field(parent, self.left_ptr), remap=remap)),\n gen.get_field(parent, self.right_ptr),\n gen.get_field(parent, self.left_ptr))\n def merge_volumes(self, gen, n1, n2, into):\n changed = fresh_name(\"changed\")\n new_value = fresh_name(\"new_value\")\n t = self.the_type # TODO\n proc = gen.decl(changed, BoolTy(), gen.false_value())\n proc += gen.decl(new_value, t)\n for f, _ in self.spec.lts:\n f = self.remap[f]\n proc += gen.set(new_value, gen.min(t, gen.get_field(n1, f), gen.get_field(n2, f)))\n proc += gen.set(changed, gen.either(changed, gen.not_true(gen.same(gen.get_field(into, f), new_value))))\n proc += gen.set(gen.get_field(into, f), new_value)\n for f, _ in self.spec.gts:\n f = self.remap[f]\n proc += gen.set(new_value, gen.max(t, gen.get_field(n1, f), gen.get_field(n2, f)))\n proc += gen.set(changed, gen.either(changed, gen.not_true(gen.same(gen.get_field(into, f), new_value))))\n proc += gen.set(gen.get_field(into, f), new_value)\n return proc, changed\n def replace_child(self, gen, parent, old_child, new_child):\n proc = gen.if_true(gen.same(gen.get_field(parent, self.right_ptr), old_child))\n proc += gen.set(gen.get_field(parent, self.right_ptr), new_child)\n proc += gen.else_true()\n proc += gen.set(gen.get_field(parent, self.left_ptr), new_child)\n proc += gen.endif()\n return proc\n def volume_contains(self, gen, large, small):\n e = gen.true_value()\n for (f1, _), (f2, _) in zip(self.spec.lts, self.spec.gts):\n f1 = self.remap[f1]\n f2 = self.remap[f2]\n e = gen.both(e, gen.le(self.the_type, gen.get_field(large, f1), gen.get_field(small, f1)))\n e = gen.both(e, gen.ge(self.the_type, gen.get_field(large, f2), gen.get_field(small, f2)))\n return e\n def find_insertion_point(self, gen, x, root, remap={}):\n ip = fresh_name(\"insertion_point\")\n proc = gen.decl(ip, self.node_type, root)\n proc += gen.while_true(gen.not_true(gen.either(\n gen.is_null(ip),\n self.is_leaf(gen, ip))))\n proc += gen.set(ip, self.select_child(gen, ip, x, remap=remap))\n proc += gen.endwhile()\n return proc, ip\n def recompute_volume(self, gen, n):\n return self.merge_volumes(gen, gen.get_field(n, self.left_ptr), gen.get_field(n, self.right_ptr), into=n)\n def recompute_volumes_recursively(self, gen, n):\n cursor = fresh_name(\"cursor\")\n proc = gen.decl(cursor, self.node_type, n)\n proc += gen.while_true(gen.not_true(gen.is_null(cursor)))\n p, changed = self.recompute_volume(gen, cursor)\n proc += p\n proc += gen.if_true(gen.not_true(changed)) + gen.break_loop() + gen.endif()\n proc += gen.set(cursor, gen.get_field(cursor, self.parent_ptr))\n proc += gen.endwhile()\n return proc\n def gen_insert(self, gen, x, parent_structure):\n wrapper = fresh_name(\"leaf\")\n proc = gen.decl(wrapper, self.node_type, gen.alloc(self.node_type.ty, []))\n for f,v in self.spec.lts + self.spec.gts:\n proc += gen.set(gen.get_field(wrapper, self.remap[f]), gen.get_field(x, f))\n proc += gen.set(gen.get_field(wrapper, self.left_ptr), gen.null_value())\n proc += gen.set(gen.get_field(wrapper, self.right_ptr), gen.null_value())\n proc += gen.set(gen.get_field(wrapper, self.parent_ptr), gen.null_value())\n proc += gen.set(gen.get_field(wrapper, self.leaf_ptr), x)\n proc += gen.set(gen.get_field(x, self.record_parent_ptr), wrapper)\n\n # No root? Put it there.\n proc += gen.if_true(gen.is_null(parent_structure.field(gen, self.root)))\n proc += gen.set(parent_structure.field(gen, self.root), wrapper)\n proc += gen.else_true()\n\n # Descend to the right spot.\n p, sibling = self.find_insertion_point(gen, x, parent_structure.field(gen, self.root))\n proc += p\n\n # Create a new node to contain both wrapper and sibling\n node = fresh_name(\"newnode\")\n proc += gen.decl(node, self.node_type, gen.alloc(self.node_type.ty, []))\n proc += gen.set(gen.get_field(node, self.left_ptr), wrapper)\n proc += gen.set(gen.get_field(node, self.right_ptr), sibling)\n proc += gen.set(gen.get_field(node, self.parent_ptr), gen.null_value())\n proc += gen.set(gen.get_field(node, self.leaf_ptr), gen.null_value())\n proc += gen.set(gen.get_field(wrapper, self.parent_ptr), node)\n p, _ = self.merge_volumes(gen, wrapper, sibling, into=node)\n proc += p\n\n parent = fresh_name(\"parent\")\n proc += gen.decl(parent, self.node_type, gen.get_field(sibling, self.parent_ptr))\n proc += gen.set(gen.get_field(sibling, self.parent_ptr), node)\n\n # Sibling is a leaf and the root\n proc += gen.if_true(gen.is_null(parent))\n proc += gen.set(parent_structure.field(gen, self.root), node)\n\n # Sibling is a leaf and has a parent\n proc += gen.else_true()\n proc += gen.set(gen.get_field(node, self.parent_ptr), parent)\n proc += self.replace_child(gen, parent, old_child=sibling, new_child=node)\n proc += gen.while_true(gen.not_true(gen.is_null(parent)))\n p, changed = self.merge_volumes(gen, parent, node, into=parent)\n proc += p\n proc += gen.if_true(gen.not_true(changed)) + gen.break_loop() + gen.endif()\n proc += gen.set(parent, gen.get_field(parent, self.parent_ptr))\n proc += gen.endwhile()\n\n proc += gen.endif()\n proc += gen.endif()\n return proc\n\n def gen_remove(self, gen, x, parent_structure):\n x_node = fresh_name(\"x_node\")\n x_parent = fresh_name(\"x_parent\")\n x_grandparent = fresh_name(\"x_grandparent\")\n\n # x is the root!\n proc = gen.decl(x_node, self.node_type, gen.get_field(x, self.record_parent_ptr))\n proc += gen.if_true(gen.same(x_node, parent_structure.field(gen, self.root)))\n proc += gen.free(self.node_type, x_node)\n proc += gen.set(parent_structure.field(gen, self.root), gen.null_value())\n proc += gen.else_true()\n\n proc += gen.decl(x_parent, self.node_type, gen.get_field(x_node, self.parent_ptr))\n sibling = fresh_name(\"sibling\")\n proc += gen.decl(sibling, self.node_type, gen.ternary(\n gen.same(gen.get_field(x_parent, self.left_ptr), x_node),\n gen.get_field(x_parent, self.right_ptr),\n gen.get_field(x_parent, self.left_ptr)))\n\n # x's parent is the root!\n proc += gen.if_true(gen.same(x_parent, parent_structure.field(gen, self.root)))\n proc += gen.set(parent_structure.field(gen, self.root), sibling)\n proc += gen.set(gen.get_field(sibling, self.parent_ptr), gen.null_value())\n\n # x's parent is not the root!\n proc += gen.else_true()\n proc += gen.decl(x_grandparent, self.node_type, gen.get_field(x_parent, self.parent_ptr))\n proc += self.replace_child(gen, x_grandparent, x_parent, sibling)\n proc += gen.set(gen.get_field(sibling, self.parent_ptr), x_grandparent)\n proc += self.recompute_volumes_recursively(gen, x_grandparent)\n proc += gen.endif()\n\n proc += gen.free(self.node_type, x_node)\n proc += gen.free(self.node_type, x_parent)\n proc += gen.endif()\n return proc\n\n def gen_remove_in_place(self, gen, parent_structure, iterator):\n proc = self.gen_remove(gen, iterator.field(gen, self.prev_name), parent_structure)\n return proc, iterator.field(gen, self.prev_name)\n def gen_update(self, gen, fields, x, remap, parent_structure):\n if not any(f in ([ff for ff,_ in self.spec.lts] + [ff for ff,_ in self.spec.gts]) for f in remap):\n return \"\" # no effect!\n\n x_node = fresh_name(\"x_node\")\n x_parent = fresh_name(\"x_parent\")\n\n proc = gen.comment(\"update procedure for {}\".format(remap))\n proc += gen.decl(x_node, self.node_type, gen.get_field(x, self.record_parent_ptr))\n proc += gen.decl(x_parent, self.node_type, gen.get_field(x_node, self.parent_ptr))\n\n # copy values up into the wrapper node\n for f, v in remap.items():\n proc += gen.set(gen.get_field(x_node, self.remap[f]), v)\n\n # if x is the only thing in the tree, no problem! Otherwise...\n proc += gen.if_true(gen.not_true(gen.is_null(x_parent)))\n\n # save a reference to x_node's old sibling\n old_sibling = fresh_name(\"old_sibling\")\n proc += gen.decl(old_sibling, self.node_type, gen.ternary(\n gen.same(gen.get_field(x_parent, self.left_ptr), x_node),\n gen.get_field(x_parent, self.right_ptr),\n gen.get_field(x_parent, self.left_ptr)))\n\n # Find the insertion point: the new sibling for x_node.\n # We will replace this node with x_parent, and move this as a child of\n # x_parent in the tree.\n p, new_sibling = self.find_insertion_point(gen, x, parent_structure.field(gen, self.root), remap=remap)\n proc += p\n\n new_grandparent = fresh_name(\"new_grandparent\")\n proc += gen.decl(new_grandparent, self.node_type, gen.get_field(new_sibling, self.parent_ptr))\n\n # If the found location is not x_node or old_sibling, then we need to\n # actually do the transform.\n proc += gen.if_true(gen.not_true(gen.same(x_parent, new_grandparent)))\n x_grandparent = fresh_name(\"x_grandparent\")\n proc += gen.decl(x_grandparent, self.node_type, gen.get_field(x_parent, self.parent_ptr))\n proc += gen.set(gen.get_field(old_sibling, self.parent_ptr), x_grandparent)\n proc += self.replace_child(gen, x_grandparent, x_parent, old_sibling)\n proc += gen.set(gen.get_field(x_parent, self.parent_ptr), new_grandparent)\n proc += self.replace_child(gen, new_grandparent, new_sibling, x_parent)\n proc += gen.set(gen.get_field(new_sibling, self.parent_ptr), x_parent)\n proc += self.replace_child(gen, x_parent, old_sibling, new_sibling)\n proc += self.recompute_volumes_recursively(gen, x_grandparent)\n p, _ = self.recompute_volume(gen, x_parent)\n proc += p\n proc += gen.endif()\n\n # Expand x's chain to include the new value\n proc += self.recompute_volumes_recursively(gen, new_grandparent)\n\n proc += gen.endif()\n return proc\n\n def is_leaf(self, gen, node):\n return gen.not_true(gen.is_null(gen.get_field(node, self.leaf_ptr)))\n def query_holds(self, gen, record):\n qvars = [(v, NativeTy(self.field_types[f])) for f, v in self.spec.lts] + [(v, NativeTy(self.field_types[f])) for f, v in self.spec.gts]\n fts = [(f, NativeTy(t)) for f, t in self.field_types.items()]\n return gen.predicate(fts, qvars, self.predicate, record)\n def intersects_query(self, gen, node):\n result = gen.true_value()\n for f, v in self.spec.lts:\n result = gen.both(result, gen.le(NativeTy(self.field_types[f]), gen.get_field(node, self.remap[f]), v))\n for f, v in self.spec.gts:\n result = gen.both(result, gen.ge(NativeTy(self.field_types[f]), gen.get_field(node, self.remap[f]), v))\n return result\n def find_first(self, gen, tree_root):\n cursor = fresh_name(\"cursor\")\n out = fresh_name(\"first\")\n\n proc = gen.decl(cursor, self.node_type, tree_root)\n proc += gen.decl(out, RecordType(), gen.null_value())\n\n proc += gen.while_true(gen.true_value())\n\n # greedy descent until you find a leaf\n proc += gen.while_true(gen.not_true(self.is_leaf(gen, cursor)))\n proc += gen.if_true(self.intersects_query(gen, gen.get_field(cursor, self.left_ptr)))\n proc += gen.set(cursor, gen.get_field(cursor, self.left_ptr))\n proc += gen.else_if(self.intersects_query(gen, gen.get_field(cursor, self.right_ptr)))\n proc += gen.set(cursor, gen.get_field(cursor, self.right_ptr))\n proc += gen.else_true()\n proc += gen.break_loop()\n proc += gen.endif()\n proc += gen.endwhile()\n\n # if we are at a leaf AND the leaf matches, we're done!\n proc += gen.if_true(gen.both(\n self.is_leaf(gen, cursor),\n self.query_holds(gen, gen.get_field(cursor, self.leaf_ptr))))\n proc += gen.set(out, gen.get_field(cursor, self.leaf_ptr))\n proc += gen.break_loop()\n proc += gen.endif()\n\n # otherwise, ascend until we can descend to the right and then do so\n proc += gen.while_true(gen.not_true(gen.same(cursor, tree_root)))\n parent = fresh_name(\"parent\")\n proc += gen.decl(parent, self.node_type, gen.get_field(cursor, self.parent_ptr))\n proc += gen.if_true(gen.both(\n gen.same(cursor, gen.get_field(parent, self.left_ptr)),\n self.intersects_query(gen, gen.get_field(parent, self.right_ptr))))\n proc += gen.set(cursor, gen.get_field(parent, self.right_ptr))\n proc += gen.break_loop()\n proc += gen.endif()\n proc += gen.set(cursor, parent)\n proc += gen.endwhile()\n\n # if we are stuck at the root, then we're done!\n proc += gen.if_true(gen.same(cursor, tree_root))\n proc += gen.break_loop()\n proc += gen.endif()\n\n proc += gen.endwhile()\n\n return proc, out\n def gen_has_next(self, gen, parent_structure, iterator):\n return \"\", gen.not_true(gen.is_null(iterator.field(gen, self.cursor_name)))\n def gen_current(self, gen, parent_structure, iterator):\n return \"\", iterator.field(gen, self.cursor_name)\n def _gen_advance(self, gen, stack, cursor, prev):\n node = fresh_name(\"node\")\n proc = gen.set(prev, cursor)\n proc += gen.set(cursor, gen.null_value())\n proc += gen.while_true(gen.not_true(gen.stack_is_empty(stack)))\n proc += gen.decl(node, self.node_type, gen.stack_peek(stack))\n proc += gen.stack_pop(stack)\n\n proc += gen.if_true(self.is_leaf(gen, node))\n\n # TODO: determine when this if-check is necessary! It isn't for\n # Bullet, but it _is_ in general.\n # proc += gen.if_true(self.query_holds(gen, gen.get_field(node, self.leaf_ptr)))\n proc += gen.set(cursor, gen.get_field(node, self.leaf_ptr))\n proc += gen.break_loop()\n # proc += gen.endif()\n\n proc += gen.else_true()\n\n if True:\n l = fresh_name(\"left\")\n r = fresh_name(\"right\")\n\n proc += gen.decl(l, self.node_type, gen.get_field(node, self.left_ptr))\n proc += gen.decl(r, self.node_type, gen.get_field(node, self.right_ptr))\n\n for n in (l, r):\n proc += gen.if_true(self.intersects_query(gen, n))\n proc += gen.stack_push(stack, n)\n proc += gen.endif()\n else:\n\n proc += gen.if_true(self.intersects_query(gen, node))\n proc += gen.stack_push(stack, gen.get_field(node, self.left_ptr))\n proc += gen.stack_push(stack, gen.get_field(node, self.right_ptr))\n proc += gen.endif()\n\n proc += gen.endif()\n\n proc += gen.endwhile()\n return proc\n def gen_advance(self, gen, parent_structure, iterator):\n prev = iterator.field(gen, self.prev_name)\n cursor = iterator.field(gen, self.cursor_name)\n\n if self.stack_iteration:\n return self._gen_advance(gen, iterator.field(gen, self.stack_name), cursor, prev)\n\n proc = gen.comment(\"advance\")\n proc += gen.set(prev, cursor)\n cursor = fresh_name(\"cursor\")\n proc += gen.decl(cursor, self.node_type, gen.get_field(cursor, self.record_parent_ptr))\n proc += gen.while_true(gen.true_value())\n\n # ascend until we can descend to the right and then do so\n proc += gen.while_true(gen.not_true(gen.is_null(gen.get_field(cursor, self.parent_ptr))))\n parent = fresh_name(\"parent\")\n proc += gen.decl(parent, self.node_type, gen.get_field(cursor, self.parent_ptr))\n proc += gen.if_true(gen.both(\n gen.same(cursor, gen.get_field(parent, self.left_ptr)),\n self.intersects_query(gen, gen.get_field(parent, self.right_ptr))))\n proc += gen.set(cursor, gen.get_field(parent, self.right_ptr))\n proc += gen.break_loop()\n proc += gen.endif()\n proc += gen.set(cursor, parent)\n proc += gen.endwhile()\n\n # if we are stuck at the root, then we're done!\n proc += gen.if_true(gen.is_null(gen.get_field(cursor, self.parent_ptr)))\n proc += gen.set(cursor, gen.null_value())\n proc += gen.break_loop()\n proc += gen.endif()\n\n # find the first matching node in this subtree, if it exists\n p, m = self.find_first(gen, cursor)\n proc += p\n\n # we found the min!\n proc += gen.if_true(gen.not_true(gen.is_null(m)))\n proc += gen.set(cursor, m)\n proc += gen.break_loop()\n proc += gen.endif()\n\n proc += gen.endwhile()\n return proc\n def check_rep(self, gen, parent_structure):\n stk = fresh_name(\"stack\")\n node = fresh_name(\"node\")\n record = fresh_name(\"record\")\n\n proc = gen.decl(stk, StackTy(self.node_type), gen.new_stack(self.node_type))\n proc += gen.if_true(gen.not_true(gen.is_null(parent_structure.field(gen, self.root))))\n proc += gen.stack_push(stk, parent_structure.field(gen, self.root))\n proc += gen.endif()\n proc += gen.while_true(gen.not_true(gen.stack_is_empty(stk)))\n proc += gen.decl(node, self.node_type, gen.stack_peek(stk))\n proc += gen.stack_pop(stk)\n\n proc += gen.if_true(gen.is_null(gen.get_field(node, self.parent_ptr)))\n proc += gen.assert_true(gen.same(node, parent_structure.field(gen, self.root)))\n proc += gen.else_true()\n proc += gen.assert_true(gen.is_null(gen.get_field(gen.get_field(node, self.parent_ptr), self.leaf_ptr)))\n proc += gen.assert_true(gen.either(\n gen.same(node, gen.get_field(gen.get_field(node, self.parent_ptr), self.left_ptr)),\n gen.same(node, gen.get_field(gen.get_field(node, self.parent_ptr), self.right_ptr))))\n proc += gen.endif()\n\n proc += gen.if_true(gen.is_null(gen.get_field(node, self.leaf_ptr)))\n for ptr in (self.left_ptr, self.right_ptr):\n proc += gen.assert_true(gen.not_true(gen.is_null(gen.get_field(node, ptr))))\n proc += gen.assert_true(gen.same(node, gen.get_field(gen.get_field(node, ptr), self.parent_ptr)))\n proc += gen.stack_push(stk, gen.get_field(node, ptr))\n for f,_ in self.spec.lts:\n proc += gen.assert_true(gen.same(\n gen.get_field(node, self.remap[f]),\n gen.min(\n self.the_type,\n gen.get_field(gen.get_field(node, self.left_ptr), self.remap[f]),\n gen.get_field(gen.get_field(node, self.right_ptr), self.remap[f]))))\n for f,_ in self.spec.gts:\n proc += gen.assert_true(gen.same(\n gen.get_field(node, self.remap[f]),\n gen.max(\n self.the_type,\n gen.get_field(gen.get_field(node, self.left_ptr), self.remap[f]),\n gen.get_field(gen.get_field(node, self.right_ptr), self.remap[f]))))\n proc += gen.else_true()\n proc += gen.decl(record, RecordType(), gen.get_field(node, self.leaf_ptr))\n proc += gen.assert_true(gen.same(node, gen.get_field(record, self.record_parent_ptr)))\n for f,_ in (self.spec.lts + self.spec.gts):\n proc += gen.assert_true(gen.same(gen.get_field(node, self.remap[f]), gen.get_field(record, f)))\n proc += gen.endif()\n\n proc += gen.endwhile()\n return proc\n","sub_path":"src/structures/volume_tree.py","file_name":"volume_tree.py","file_ext":"py","file_size_in_byte":26528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"391217865","text":"# %%\nfrom bs4 import BeautifulSoup\nimport sys\nimport os\nimport shutil\nimport ntpath\n\n# Changing the working directory to script location\nos.chdir(os.path.dirname(os.path.abspath(__file__)))\n\n\ndef check_locations(dir_local, dir_web,):\n\n # Setting the directories for the web and local folders\n list_dir_local = os.listdir(dir_local)\n list_dir_web = os.listdir(dir_web)\n\n # Finding the missing files\n files_missing = [i for i in list_dir_local if i not in list_dir_web]\n\n # Cheking if the directories are synced, generating a list for location of missing files\n if len(files_missing) != 0:\n dir_files_missing = [dir_local + \"/\" + i for i in files_missing]\n\n print('The missing files were:')\n print(*files_missing, sep='\\n')\n print(\"\\n\")\n print('dir_files_missing was generated')\n print(\"\\n\")\n\n return dir_files_missing\n else:\n dir_files_missing = []\n print('The directories were synced')\n print('dir_files_missing is empty')\n print(\"\\n\")\n return dir_files_missing\n\n\ndef filename_from_path(path):\n head, tail = ntpath.split(path)\n return tail or ntpath.basename(head)\n\n\ndef move_missing(dir_local, dir_web, dir_files_missing):\n\n if len(dir_files_missing) != 0:\n\n dir_files_moved = []\n\n for i in dir_files_missing:\n # Move new files to dir_web\n shutil.copy2(i, dir_web)\n\n # Append new directories to dir_files_moved\n file_name = filename_from_path(i)\n file_dir = f\"{dir_web}/{file_name}\"\n dir_files_moved.append(file_dir)\n\n print(\"succesfully moved the following files\")\n print(*dir_files_missing, sep='\\n')\n print(\"\\n\")\n print(\"new file paths are\")\n print(*dir_files_moved, sep='\\n')\n print(\"\\n\")\n return dir_files_moved\n\n else:\n print('Missing files list was empty' + \"\\n\" + \"No files were moved\")\n return\n\ndef generate_tags_to_insert(list_of_files_to_add, html_file):\n\n tags_to_insert = []\n\n if list_of_files_to_add != None:\n\n print(f\"{len(list_of_files_to_add)} tags will be generated\")\n\n for i in list_of_files_to_add:\n soup = BeautifulSoup(html_file, 'html.parser')\n # Preapare the tag to be inserted\n tag_new_a = soup.new_tag(\"a\", attrs={\n 'class': 'column', 'href': i, 'data-lightbox': \"20_10\"})\n \n tag_new_img = soup.new_tag(\"img\", attrs={\n 'class': 'column', 'src': i, 'alt': \"\"})\n \n tag_new_a.insert(1, tag_new_img)\n\n # Append the new tags to list\n tags_to_insert.append(tag_new_a)\n\n print(f\"sucessfully generated {len(tags_to_insert)} tags\")\n return tags_to_insert\n\n else:\n print('0 tags generated')\n return tags_to_insert\n\ndef generate_new_html(html_file, number_of_images_on_page, number_of_images_on_div, list_of_tags_to_add):\n \n # Finding the number of divs on the webpage\n number_of_divs = number_of_images_on_page / number_of_images_on_div\n number_of_divs = int(number_of_divs)\n\n # Creating a master list to itterate\n tags = list_of_tags_to_add\n\n # Finding the divs for insertion\n with open(html_file, \"r\") as file:\n soup = BeautifulSoup(file, 'html.parser')\n\n # Find empty div\n for i in range(1, 12):\n tag_checked = soup.find(id=f\"row-{i}\")\n\n # Fill the div until it has desired number of images\n while len(tag_checked.contents) < number_of_images_on_div:\n \n if tags:\n # Insert the a tag into the empty tag\n tag_checked.insert(0,tags[-1])\n\n # Remove the inserted tag from the master list\n tags.pop()\n print(tag_checked.prettify)\n print(\"\\n\")\n else:\n break\n \n return soup\n\ndef owerite_html(old_html_file, new_html_file):\n\n with open(old_html_file, \"w\") as file:\n file.write(str(new_html_file))\n\n# Local directory\ndir_local = \"../../testdir\"\n\n# Web directory\ndir_web = \"Assets/Dailies\"\n\n# HTML file\nhtml_file = \"dailies.html\"\n\nimages_page = 33\nimages_div = 3\n\ndir_files_missing = check_locations(dir_local=dir_local, dir_web=dir_web)\ndir_files_moved = move_missing(dir_local=dir_local, dir_web=dir_web, dir_files_missing=dir_files_missing)\ntags_to_add = generate_tags_to_insert(dir_files_moved, html_file)\n\nnew_html = generate_new_html(html_file, images_page, images_div, tags_to_add)\n\nowerite_html(html_file, new_html)\n# %%","sub_path":"updater.py","file_name":"updater.py","file_ext":"py","file_size_in_byte":4684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"91526737","text":"# -*- coding: Utf-8 -*-\n\nfrom enum import Enum\nfrom PyQt5.QtCore import QObject, QDataStream, QIODevice, QByteArray, QVariant, pyqtSignal\nfrom PyQt5.QtNetwork import QTcpSocket, QHostAddress, QAbstractSocket\n\nfrom static.GVARS import GV, warning\n\n\nclass PACClient(QObject):\n\n signalReady = pyqtSignal()\n signalDisconnected = pyqtSignal()\n signalFail = pyqtSignal()\n signalBArrayRecieved = pyqtSignal(QByteArray)\n signalSyncStart = pyqtSignal()\n\n signal_order_set = pyqtSignal(list)\n\n def __init__(self, parent=None):\n QObject.__init__(self, parent)\n\n self.tcpSocket = QTcpSocket(self)\n\n self._total = 0\n self._tailSize = 0\n self._readBlockSize = 0\n self.block_size = 0\n self._tail = QByteArray()\n\n self.__create_connects()\n\n def __create_connects(self):\n self.tcpSocket.readyRead.connect(self.read_tcp_data)\n self.tcpSocket.error.connect(self.on_error)\n self.tcpSocket.connected.connect(self.on_connected)\n self.tcpSocket.disconnected.connect(self.on_disconnected)\n self.signalBArrayRecieved.connect(self.__b_array_recieved)\n\n def connect_to_pac(self):\n self.tcpSocket.connectToHost(QHostAddress(GV.settings.values.ip()), GV.settings.values.port(), QIODevice.ReadWrite)\n\n def hand_shake(self):\n block = QByteArray()\n\n out = QDataStream(block, QIODevice.WriteOnly)\n out.setVersion(QDataStream.Qt_4_0)\n\n out << QVariant(MessageType.hand_shake)\\\n << QVariant(GV.settings.values.regiment())\\\n << QVariant(GV.settings.values.division())\\\n << QVariant(GV.settings.values.battery())\\\n << QVariant(GV.settings.values.assignment())\n self.write_to_socket(block)\n\n def write_to_socket(self, block):\n four_bytes = self.__int_to_bytes(block.size() + 4)\n block.prepend(four_bytes)\n self.tcpSocket.write(block)\n\n def on_disconnected(self):\n self._total = 0\n self._tailSize = 0\n self._readBlockSize = 0\n self.block_size = 0\n self._tail = QByteArray()\n self.signalDisconnected.emit()\n\n def on_connected(self):\n self.signalReady.emit()\n self.hand_shake()\n\n def on_error(self, error):\n string = u\"Ошибка соединения - \"\n if error == QAbstractSocket.RemoteHostClosedError:\n string += u\"ПАК разорвал соединение\"\n elif error == QAbstractSocket.HostNotFoundError:\n string += u\"Сервер не найдет. Проверьте правильность указанных настроек - ip адрес и порт.\"\n elif error == QAbstractSocket.ConnectionRefusedError:\n string += u\"Не удалось установить соединение.\"\n else:\n string += u\"При попытке соединения возникла следующая ошибка: %s.\" % self.tcpSocket.errorString()\n warning.show(string)\n GV.report.add_new_report(GV.report.ReportType.fail, string)\n self.signalFail.emit()\n\n def on_msg(self):\n #print (\"on_msg\")\n instr = QDataStream(self.tcpSocket.readAll())\n instr.setVersion(QDataStream.Qt_4_0)\n\n a = QVariant()\n b = QVariant()\n\n instr >> a >> b\n a=a.toInt()[0]\n b=b.toInt()[0]\n if b==1:\n signal = SIGNAL('taskSet_'+str(a)+'()')\n else:\n signal = SIGNAL('taskUnSet_' + str(a) + '()')\n\n self.emit(signal)\n self.task_completed(a)\n\n def send_list_tasks(self, ltasks):\n block = QByteArray()\n out = QDataStream(block, QIODevice.WriteOnly)\n out.setVersion(QDataStream.Qt_4_0)\n out << QVariant(MessageType.tasks_list) << ltasks\n self.write_to_socket(block)\n\n def send_list_imitators(self, limits):\n block = QByteArray()\n out = QDataStream(block, QIODevice.WriteOnly)\n out.setVersion(QDataStream.Qt_4_0)\n out << QVariant(MessageType.imitators_list) << limits\n self.write_to_socket(block)\n\n def task_completed(self, idTask):\n block = QByteArray()\n out = QDataStream(block, QIODevice.WriteOnly)\n out.setVersion(QDataStream.Qt_4_0)\n out << QVariant(u'TaskCompleted') << QVariant(idTask)\n self.write_to_socket(block)\n\n def __b_array_recieved(self, b_array):\n message_type = QVariant()\n stream = QDataStream(b_array)\n stream.setVersion(QDataStream.Qt_4_0)\n stream >> message_type\n message_type = message_type.toInt()[0]\n #print \"message_type\", message_type\n if message_type == MessageType.hand_shake:\n self_id = QVariant()\n pac_id = QVariant()\n pac_name = QVariant()\n stream >> self_id >> pac_id >> pac_name\n #print self_id.toInt(), pac_id.toInt(), pac_name.toString()\n elif message_type == MessageType.start_sync:\n #print \"start_sync\"\n self.signalSyncStart.emit()\n elif message_type == MessageType.end_sync:\n pass\n #print \"end_sync\"\n elif message_type == MessageType.imitators_list:\n pass\n #print \"imitators_list\"\n elif message_type == MessageType.tasks_list:\n pass\n #print \"tasks_list\"\n elif message_type == MessageType.order:\n #print \"order\"\n order_source = QVariant()\n order_caption = QVariant()\n order_params = QVariant()\n order_net = QVariant()\n stream >> order_source >> order_caption >> order_params >> order_net\n #print \"cap\", order_caption.toString(), \"order_params\", order_params.toStringList()[0], \"order_net\", order_net.toString()\n data = list()\n data.append(order_source.toString())\n data.append(order_caption.toString())\n data.append(order_params.toStringList())\n data.append(order_net.toString())\n self.signal_order_set.emit(data)\n\n self.__send_order_confirm(order_caption, order_params)\n self.__send_report(order_caption, order_params)\n\n elif message_type == MessageType.confirm_order:\n pass\n #print \"confirm_order\"\n elif message_type == MessageType.report:\n pass\n #print \"report\"\n elif message_type == MessageType.tdn:\n pass\n #print \"tdn\"\n elif message_type == MessageType.text:\n pass\n #print \"text\"\n text = QVariant()\n stream >> text\n #print len(text.toString())\n elif message_type == MessageType.audio:\n pass\n #print \"audio\"\n elif message_type == MessageType.video:\n pass\n #print \"video\"\n\n def __send_order_confirm(self, cap, params):\n block = QByteArray()\n out = QDataStream(block, QIODevice.WriteOnly)\n out.setVersion(QDataStream.Qt_4_0)\n out << QVariant(MessageType.confirm_order) << QVariant(cap) << QVariant(params)\n self.write_to_socket(block)\n\n def __send_report(self, cap, params):\n block = QByteArray()\n out = QDataStream(block, QIODevice.WriteOnly)\n out.setVersion(QDataStream.Qt_4_0)\n out << QVariant(MessageType.report) << QVariant(cap) << QVariant(params)\n self.write_to_socket(block)\n\n @staticmethod\n def __bytes_to_int(a):\n return (ord(a.at(3)) << 24) | (ord(a.at(2)) << 16) | (ord(a.at(1)) << 8) | ord(a.at(0))\n\n @staticmethod\n def __int_to_bytes(int_val):\n a = (int_val >> 24) & 0xFF\n b = (int_val >> 16) & 0xFF\n c = (int_val >> 8) & 0xFF\n d = int_val & 0xFF\n byte_arr = QByteArray()\n byte_arr.append(chr(d))\n byte_arr.append(chr(c))\n byte_arr.append(chr(b))\n byte_arr.append(chr(a))\n return byte_arr\n\n def read_tcp_data(self):\n _size = self.tcpSocket.bytesAvailable()\n if _size > 0:\n _data = self.tcpSocket.read(_size)\n iterat = 0\n self._total += _size\n chunk_size = _size + self._tailSize\n data = QByteArray()\n data.resize(chunk_size)\n data.replace(self._tailSize, _size, _data)\n\n if self._tailSize > 0:\n data.replace(0, self._tailSize, self._tail)\n self._tail = QByteArray()\n self._tailSize = 0\n\n current_read_pos = 0\n while chunk_size > current_read_pos:\n iterat += 1\n if self._readBlockSize == 0:\n if chunk_size - current_read_pos < 4:\n self._tailSize = chunk_size - current_read_pos\n self._tail = QByteArray()\n self._tail.resize(self._tailSize)\n self._tail.replace(0, data.mid(current_read_pos, self._tailSize))\n data = QByteArray()\n return\n\n self._readBlockSize = self.__bytes_to_int(data.mid(current_read_pos, 4))\n #print \"self._readBlockSize\", self._readBlockSize\n self.block_size = self._readBlockSize - 4\n current_read_pos += 4\n\n if self.block_size < 1:\n pass\n #print \"Ошибочный размер сетевого блока!\", iterat\n\n if (chunk_size - current_read_pos) < self.block_size:\n self._tailSize = chunk_size - current_read_pos\n self._tail = QByteArray()\n self._tail.resize(self._tailSize)\n self._tail.replace(0, self._tailSize, data.mid(current_read_pos, self._tailSize))\n return\n\n temp = QByteArray()\n temp.resize(self.block_size)\n temp.replace(0, self.block_size, data.mid(current_read_pos, self.block_size))\n current_read_pos += self.block_size\n self._readBlockSize = 0\n self.signalBArrayRecieved.emit(temp)\n\npac_client = PACClient()\n\n\nclass MessageType():\n hand_shake = 1 # рукопожатие - представляемся ПАКу при подключении\n start_sync = 2 # начало синхронизации\n end_sync = 3 # конец синхронизации\n\n imitators_list = 4 # передача списка имитаторов\n tasks_list = 5 # передача списка учебных задач\n\n order = 6 # приказ\n confirm_order = 7 # подтверждение приказа\n report = 8 # донесение\n tdn = 9 # технологическое донесение\n\n text = 10 # передача текста\n audio = 11 # передача аудио сигнала\n video = 12 # передача видео сигнала\n\n t_command = 13 # технологическая команда\n","sub_path":"common_blocks/PRZ/network/pac_client.py","file_name":"pac_client.py","file_ext":"py","file_size_in_byte":11081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"435409485","text":"from django.shortcuts import render\nfrom .models import Lesson\nfrom article.models import Course\n\nclass WLesson():\n nu = None\n le = None\n acti = False\n def __init__(self, n, l, a):\n self.nu = n\n self.le = l\n self.acti = a\n\n def __str__(self):\n return '{n} {l} {a}'.format(n = self.nu, l = self.le, a = self.acti)\n \n\ndef getLearningPage(request):\n lessons = []\n for le in Lesson.objects.all():\n if (le.lesson_number == int(request.GET['l']) and le.c_id == int(request.GET['id'])):\n lessons.append(WLesson(le.lesson_number, le.lesson_name, True))\n elif le.c_id == int(request.GET['id']):\n lessons.append(WLesson(le.lesson_number, le.lesson_name, False))\n #print(list(map(str, lessons)))\n try:\n lesson = Lesson.objects.get(c_id=int(request.GET['id']), lesson_number=int(request.GET['l']))\n except:\n lesson = Lesson()\n lesson.lesson_name = \"This course doesn't have any lessons\"\n course = Course.objects.get(id=int(request.GET['id']))\n return render(request, 'Engine/LearningPage.html', {'lessons':lessons, 'course':course, 'lesson':lesson})","sub_path":"lesson/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"156948230","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\nfrom __future__ import unicode_literals\n\n# This file is only used if you use `make publish` or\n# explicitly specify it as your config file.\n\nimport os\nimport sys\nsys.path.append(os.curdir)\nfrom pelicanconf import *\n\n\nSITEURL = 'http://eg0.cc/blog'\nRELATIVE_URLS = False\n\n\n\nFEED_DOMAIN = SITEURL\nFEED_RSS = 'feeds/rss.xml'\n\nFEED_ALL_RSS = 'feeds/all.rss.xml'\nCATEGORY_FEED_RSS = 'feeds/category/%s.rss.xml'\nTAG_FEED_RSS = 'feeds/tag/%s.rss.xml'\n\n\n\nDELETE_OUTPUT_DIRECTORY = True\n\n# Following items are often useful when publishing\n\nDISQUS_SITENAME = \"eg0cc\"\n\n\nDISQUS_ID_PREFIX_SLUG = True ### Testing\n\n### Set DISQUS_ID_PREFIX_SLUG to True if you have configured your article URLs such that the slug alone will likely not be unique.\n\n\nGOOGLE_ANALYTICS = \"UA-35840647-3\"\n","sub_path":"publishconf.py","file_name":"publishconf.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"223944954","text":"\ndef count_bits(num):\n count = 0\n while num:\n count = count + 1\n num = num & (num-1)\n return count\n\nvar = int(input(\"Give a number\"))\nset_bits = count_bits(var)\n\nprint(\"The number of set bits is %d\"%set_bits)\n","sub_path":"python-programming-workshop/algorithms/count_set_bits_three.py","file_name":"count_set_bits_three.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"70124298","text":"from django.conf.urls import include, url\nfrom . import views\n\nurlpatterns = [\n url(r'^past$',views.past,name=\"past\"),\n url(r'^live$',views.live,name=\"live\"),\n url(r'^future$',views.future,name=\"future\"),\n\n\n url(r'^past/(?P[0-9]+)/$',views.past_contest,name='past_contest'),\n # url(r'^past/(?P[0-9]+)/leaderboard/$',views.past_leader,name='past_leader'),\n url(r'^(?P[0-9]+)/$',views.contest,name='contest'),\n url(r'^(?P[0-9]+)/submit$',views.submit,name='submit'),\n url(r'^(?P[0-9]+)/live/$',views.live_contest,name='live_contest'),\n url(r'^(?P[0-9]+)/register/$',views.register,name='register'),\n url(r'^(?P[0-9]+)/registeration/$',views.registeration,name='registeration'),\n url(r'^(?P[0-9]+)/live/leaderboard/$',views.live_leaderboard,name='live_leaderboard'),\n url(r'^(?P[0-9]+)/live/get_leaderboard$',views.get_live_leaderboard,name='live_leaderboard'),\n url(r'^past/(?P[0-9]+)/leaderboard/$',views.leaderboard,name='leaderboard'),\n\n url(r'^create_contest$',views.create_contest,name=\"create_contest\"),\n url(r'^edit_contest/(?P[0-9]+)$',views.edit_contest, name='edit_contest'),\n\n url(r'^(?P[0-9]+)/ajax$',views.ajax_q,name=\"ajax_q\"),\n]\n","sub_path":"quiz/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"617123462","text":"import os\r\nimport sys\r\nREPO_DIR = os.getcwd()\r\nMTCNN_DIR = os.path.join(REPO_DIR, 'P03_FaceRecognition', 'FaceDetect_MTCNN', 'mtcnn')\r\nMTCNN_CORE_DIR = os.path.join(MTCNN_DIR, 'core')\r\nMTCNN_TRAIN_NET_DIR = os.path.join(MTCNN_DIR, 'train_net')\r\nsys.path.append(MTCNN_DIR)\r\nsys.path.append(MTCNN_CORE_DIR)\r\nsys.path.append(MTCNN_TRAIN_NET_DIR)\r\n# mtcnn.core\r\nfrom imagedb import ImageDB\r\n# mtcnn.train_net\r\nimport train\r\n# mtcnn\r\nimport config\r\n\r\n\r\n\r\ndef train_net(annotation_file, model_store_path,\r\n end_epoch=16, frequent=200, lr=0.01, batch_size=128, use_cuda=True):\r\n\r\n imagedb = ImageDB(annotation_file)\r\n gt_imdb = imagedb.load_imdb()\r\n gt_imdb = imagedb.append_flipped_images(gt_imdb)\r\n\r\n train.train_onet(model_store_path=model_store_path, end_epoch=end_epoch, imdb=gt_imdb, batch_size=batch_size, frequent=frequent, base_lr=lr, use_cuda=use_cuda)\r\n\r\nif __name__ == '__main__':\r\n\r\n print('train ONet argument:')\r\n\r\n ROOT_DIR = os.path.dirname(MTCNN_DIR)\r\n ANNO_DIR = os.path.join(ROOT_DIR, 'anno_store')\r\n annotation_file = os.path.join(ANNO_DIR, \"imglist_anno_48.txt\")\r\n model_store_path = os.path.join(ROOT_DIR, \"model_store\")\r\n end_epoch = 30\r\n lr = 0.001\r\n batch_size = 64\r\n\r\n use_cuda = True\r\n frequent = 50\r\n\r\n\r\n train_net(annotation_file, model_store_path,\r\n end_epoch, frequent, lr, batch_size, use_cuda)\r\n","sub_path":"P03_FaceRecognition/FaceDetect_MTCNN/mtcnn/train_net/train_o_net.py","file_name":"train_o_net.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"597747511","text":"\nclass Employee(object):\n def __init__(self,name,age,salary,employment_year):\n self.name=name\n self.age=age\n self.salary=salary\n self.employment_year=employment_year\n emp1=[]\n def get_working_years(self):\n return 2020-self.employment_year\n\n\nclass Manager(Employee):\n\n def __init__(self,name,age,salary,employment_year,bonus_percentage):\n super().__init__(name,age,salary,employment_year)\n self.bonus_percentage=bonus_percentage\n def get_working_years(self):\n return 2020-self.employment_year\n\n def get_bonus(self):\n\n return self.bonus_percentage * self.salary\n\n\n\ndef main():\n emp=[]\n man=[]\n options=[\"Show Employees\",\"Show Managers\",\"Add An Employee\",\"Add A Manager\",\"Exit\"]\n print(\"Welcome to HR Pro 2019\")\n print(\"Options: \")\n for index,variable in enumerate(options,1):\n print(f\"{index}. {variable}\")\n while True:\n choice=int(input(\"What would you like to do? \"))\n if(choice==1):\n if not emp:\n print(\"Employees\")\n print()\n else:\n for num in emp:\n print(f\"Name: {num.name} Age: {num.age} Salary: {num.salary} Working Years: {num.get_working_years()}\")\n elif(choice==2):\n if not man:\n print(\"Managers\")\n print()\n else:\n for num in man:\n print(f\"Name: {num.name} Age: {num.age} Salary: {num.salary} Working Years: {num.get_working_years()} Bonus: {num.get_bonus()}\")\n elif(choice==3):\n name=input(\"Name: \")\n age=int(input(\"Age: \"))\n salary=int(input(\"Salary: \"))\n emp_year=int(input(\"Employment Year: \"))\n emp1=Employee(name,age,salary,emp_year)\n emp.append(emp1)\n\n print(\"Employee added successfully\")\n elif(choice==4):\n name=input(\"Name: \")\n age=int(input(\"Age: \"))\n salary=int(input(\"Salary: \"))\n emp_year=int(input(\"Employment Year: \"))\n bonus_percentage=float(input(\"Bonus Percentage: \"))\n man1=Manager(name,age,salary,emp_year,bonus_percentage)\n man1.get_bonus()\n man.append(man1)\n\n print(\"Manager added successfully\")\n elif(choice==5):\n break\n else:\n print(\"you have entered a wrong number\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"hr_pro.py","file_name":"hr_pro.py","file_ext":"py","file_size_in_byte":2514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"12787527","text":"\"\"\"\nGiven a list of numbers A and B\nthe assignment is to find a threshold t\nsuch that A lies on the one side of t\nand B lies on the other side, and t\nminimizes the number of mis-classifications.\n\"\"\"\n\nfrom colorama import Fore\n\ntrue, false, null = True, False, None\n\n\ndef main():\n # A = [1.3, -.5, 2.3, 5.1, 8.2, -.4, 10.5, 2.5, -4.2, -3.1, 1.7, 2.8, 4.6, 3.0, 10.1, 2.5, .8, 8.1, 2.7, -6.5, ]\n # B = [-2.7, -3.2, -5.4, -10.8, -1.1, -3.4, .7, -5.0, -7.1, -10.0]\n A = [1.3, -.5, 2.3, 5.1, 8.2, -.4, 10.5, 2.5, -4.2, -3.1, 1.7, 2.8, 4.6, 3.0, 10.1, 2.5, .8, 8.1, 2.7, -6.5, ]\n B = [-2.7, -3.2, -5.4, -10.8, -1.1, -3.4, .7, -5.0, -7.1, -10.0, .9, 1.2, -4.9, -6.2, 1.3, -1.9, -8.7, -7.4, .9,\n -10.3, ]\n # X is the set of probable dividers\n # This can be any value from A or B\n X = set(A + B)\n\n min_divider, max_divider = null, null\n min_mis_calc = A + B\n max_mis_calc = []\n\n A_with_class = [(val, 'A') for val in A]\n B_with_class = [(val, 'B') for val in B]\n Data = A_with_class + B_with_class\n for thresh in X:\n mis_calculate = []\n for val, cls in Data:\n # We assume, A is in lower side of thresh\n # and B is in the upper side\n # If any value doesn't follow this,\n # count it as mis-classification\n if (val > thresh and cls == 'A') or (val <= thresh and cls == 'B'):\n mis_calculate.append((val, cls))\n\n if len(mis_calculate) < len(min_mis_calc):\n min_mis_calc = mis_calculate\n min_divider = thresh\n if len(mis_calculate) > len(max_mis_calc):\n max_mis_calc = mis_calculate\n max_divider = thresh\n\n # Check if we can achieve better accuracy by\n # Putting A on the upper side and B on the lower\n upside_down = false\n if (len(Data) - len(max_mis_calc)) < len(min_mis_calc):\n upside_down = true\n min_divider = max_divider\n min_mis_calc = [x for x in Data if x not in max_mis_calc]\n\n # Show Accuracy\n accuracy = round(100 - (len(min_mis_calc) / len(Data) * 100), 2)\n summary = \"{} can classify with {}% accuracy.\".format(min_divider, accuracy)\n print(Fore.BLUE + summary + Fore.RESET)\n # Show decision regions\n divisions = [\">\", min_divider, \">=\"] if upside_down else [\"<\", min_divider, \"<=\"]\n print(Fore.GREEN + \"A {} {} {} B\".format(*divisions) + Fore.RESET)\n # Show mis-classifications\n print(Fore.RED + \"Misclassified: \" + Fore.RESET, end='')\n for i, x in enumerate(min_mis_calc):\n print(\"({}: {})\".format(*x), end='')\n if len(min_mis_calc) > 2 and i < len(min_mis_calc) - 2:\n print(', ', end='')\n elif i is len(min_mis_calc) - 2:\n print(' and ', end='')\n print('')\n\n\nmain()\n","sub_path":"assignments/adhoc/binary-classification.py","file_name":"binary-classification.py","file_ext":"py","file_size_in_byte":2769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"524174422","text":"#!/bin/python\n# Filename: ftest.py\n#\n\nimport sys, httplib, json\n\n\nclass Connect:\n\tdef __init__(self, ipaddress, port = 80):\n\t\tself.net_ip = ipaddress\n\t\tself.net_port = port\n\t\tself._connect()\n\t\t\n\tdef __del__(self):\n\t\tself.h_conn.close()\n\t\t\n\tdef _connect(self):\n\t\ttry:\n\t\t\tself.h_conn = httplib.HTTPConnection(self.net_ip, self.net_port, timeout = 10)\n\t\texcept NotConnected as msg:\n\t\t\tprint(\"ERROR: can not connect to the server {0}\".format(msg))\n\t\texcept:\n\t\t\tprint(\"Some shit happens:\" + sys.exc_info()[0])\n\t\t\n\tdef _getuservalue(self, *args):\n\t\tfor param in args:\n\t\t\tparam[\"value\"] = str(raw_input(param[\"prompt\"]))\n\t\t\t\n\tdef _handleresponse(self, meth, url, req):\n\t\ttry:\n\t\t\tself.h_conn.request(meth, url, req)\n\t\t\tresponse = self.h_conn.getresponse()\n\t\t\tprint(\"\\n>>>\\t\\tResponse status: {0} Reason: {1}\\n>>>\".format(response.status, response.reason))\n\t\t\trdata = json.loads(response.read())\n\t\t\tif(rdata[\"err_code\"] == 0):\n\t\t\t\treturn rdata[\"answer\"]\n\t\t\telse:\n\t\t\t\tprint(\">>> ERROR: {0}[{1}]\".format(rdata[\"err_info\"], rdata[\"err_code\"]))\n\t\texcept KeyError as wrongkey:\n\t\t\tprint(\"ERROR: Server\\'s answer has no key {0}\".format(str(wrongkey)))\n\t\texcept httplib.HTTPException as httpexc:\n\t\t\tprint(\"ERROR: catch HTTPException: {0}\\nTrying to reconnect\".format(httpexc))\n\t\t\tself.h_conn.close()\n\t\t\tself._connect()\n\t\texcept:\n\t\t\tprint(\"Some error occurred: {0}\\nTry again\\n\".format(sys.exc_info()[0]))\n\t\treturn None\n\t\n\t\t\n\t\t\nclass Account(Connect):\n\t__inval = { \"path\": \"/account/\",\n\t\t\t \"id\": { \"value\": \"\", \"prompt\": \"Enter ID: \" }, \n\t\t\t\t\"name\": { \"value\": \"\", \"prompt\": \"Enter name: \" }, \n\t\t\t \"dreid\": { \"value\": \"\", \"prompt\": \"Enter DREid: \" } }\n\t\t\n\tdef new(self):\n\t\tself._getuservalue(self.__inval[\"name\"], self.__inval[\"dreid\"])\n\t\tself.__getanswer(\"POST\", self.__inval[\"path\"], json.dumps({ \"name\": self.__inval[\"name\"][\"value\"], \"dreid\": self.__inval[\"dreid\"][\"value\"] }))\n\t\t\n\tdef update(self):\n\t\tself._getuservalue(self.__inval[\"id\"], self.__inval[\"name\"], self.__inval[\"dreid\"])\n\t\tself.__getanswer(\"POST\", \"{0}{1}\".format(self.__inval[\"path\"], self.__inval[\"id\"][\"value\"]), json.dumps({ \"name\": self.__inval[\"name\"][\"value\"], \"dreid\": self.__inval[\"dreid\"][\"value\"] }))\n\t\t\n\tdef get(self):\n\t\tself._getuservalue(self.__inval[\"id\"])\n\t\tself.__getanswer(\"GET\", \"{0}{1}\".format(self.__inval[\"path\"], self.__inval[\"id\"][\"value\"]))\n\t\t\n\tdef __getanswer(self, meth, url, req = None):\n\t\tresval = self._handleresponse(meth, url, req)\n\t\tif(resval != None):\n\t\t\tprint(\">>> Status: OK! ID: [{0}] Name: [{1}] DREid: [{2}]\\n\".format(resval[\"id\"], resval[\"name\"], resval[\"dreid\"]))\n\t\t\n\n\nclass Activation(Connect):\n\t__inval = { \"path\": \"/activation/\",\n\t \"account\": { \"value\": \"\", \"prompt\": \"Enter account ID: \"}, \n\t\t\t\"os_build\": { \"value\": \"\", \"prompt\": \"Enter OS build: \" }, \n\t\t\"device_class\": { \"value\": \"\", \"prompt\": \"Enter device class: \" },\n\t\t \"is_active\": { \"value\": \"\", \"prompt\": \"Enter device status: \"},\n \"source_data\": { \"key1\" : 50, \"key2\" : 50 }}\n\t\n\tdef activate(self):\n\t\tself._getuservalue(self.__inval[\"account\"], self.__inval[\"os_build\"], self.__inval[\"device_class\"], self.__inval[\"is_active\"])\n\t\tself.__getanswer(\"POST\", self.__inval[\"path\"], json.dumps({\"account\": int(self.__inval[\"account\"][\"value\"]), \"os_build\": int(self.__inval[\"os_build\"][\"value\"]), \"device_class\": int(self.__inval[\"device_class\"][\"value\"]),\"is_active\": int(self.__inval[\"is_active\"][\"value\"]), \"source_data\": self.__inval[\"source_data\"]}))\n\t\t\n\tdef update(self):\n\t\tself._getuservalue(self.__inval[\"account\"], self.__inval[\"os_build\"], self.__inval[\"device_class\"], self.__inval[\"is_active\"])\n\t\tself.__getanswer(\"POST\", \"{0}{1}\".format(self.__inval[\"path\"], self.__inval[\"account\"][\"value\"] ), json.dumps({\"account\": int(self.__inval[\"account\"][\"value\"]), \"os_build\": int(self.__inval[\"os_build\"][\"value\"]), \"device_class\": int(self.__inval[\"device_class\"][\"value\"]), \"is_active\": int(self.__inval[\"is_active\"][\"value\"]), \"source_data\": self.__inval[\"source_data\"]}))\n\t\t\n\tdef get(self):\n\t\tself._getuservalue(self.__inval[\"account\"])\n\t\tself.__getanswer(\"GET\", \"{0}{1}\".format(self.__inval[\"path\"], self.__inval[\"account\"][\"value\"]))\n\t\t\n\tdef deactivate(self):\n\t\tself._getuservalue(self.__inval[\"account\"])\n\t\tself.__getanswer(\"DELETE\", \"{0}{1}\".format(self.__inval[\"path\"], self.__inval[\"account\"][\"value\"]))\n\t\t\n# Private methods:\n\tdef __getanswer(self, meth, url, req = None):\n\t\tresval = self._handleresponse(meth, url, req)\n\t\tif(resval != None):\n\t\t\tprint(\">>> Status: OK! ID: [{0}] Account: [{1}] Device class: [{2}] Still active: [{3}]\\n\".format(resval[\"id\"], resval[\"account\"], resval[\"device_class\"], resval[\"is_active\"]))\n\t\t\n\t\t\nclass DomainKey(Connect):\n\t__inval = { \"path\": \"/domainkey/\",\n\t\t\t\t \"id\": { \"value\": \"\", \"prompt\": \"Enter ID: \"},\n\t\t\t \"account\": { \"value\": \"\", \"prompt\": \"Enter account ID: \"},\n\t\t \"expire_date\": { \"value\": \"\", \"prompt\": \"Enter expire date (timestamp): \"}}\n\t\n\tdef newkey(self):\n\t\tself._getuservalue(self.__inval[\"account\"], self.__inval[\"expire_date\"])\n\t\tself.__getanswer(\"POST\", self.__inval[\"path\"], json.dumps({\"account\": int(self.__inval[\"account\"][\"value\"]), \"expire_date\": int(self.__inval[\"expire_date\"][\"value\"])}))\n\t\t\n\tdef updatekey(self): \n\t\tself._getuservalue(self.__inval[\"id\"], self.__inval[\"account\"], self._getuservalue(self.__inval[\"expire_date\"]))\n\t\tself.__getanswer(\"POST\", \"{0}{1}\".format(self.__inval[\"path\"], self.__inval[\"id\"][\"value\"]) , json.dumps({\"expire_date\": int(self.__inval[\"expire_date\"][\"value\"])}))\n\t\t\n\tdef getkey(self):\n\t\tself._getuservalue(self.__inval[\"id\"])\n\t\tself.__getanswer(\"GET\", \"{0}{1}\".format(self.__inval[\"path\"], self.__inval[\"id\"][\"value\"]))\n\t\n\tdef removekey(self):\n\t\tself._getuservalue(self.__inval[\"id\"])\n\t\tself.__getanswer(\"DELETE\", \"{0}{1}\".format(self.__inval[\"path\"], self.__inval[\"id\"][\"value\"]))\n\t\t\n# Private methods:\n\tdef __getanswer(self, meth, url, req = None):\n\t\tresval = self._handleresponse(meth, url, req)\n\t\tif(resval != None):\n\t\t\tprint(\">>> Status: OK! ID: [{0}] Account: [{1}] Expire date: [{2}] \\n\".format(resval[\"id\"], resval[\"account\"], resval[\"expire_date\"]))\n\t\t\n\t\t\nif __name__ == \"__main__\":\n\tif(len(sys.argv) < 2):\n\t\traise SystemExit(\"ERROR! Too few arguments.\\nUsage: \\n\\n\")\n\tacc = Account(sys.argv[1])\n\tdev = Activation(sys.argv[1])\n\tdomk = DomainKey(sys.argv[1])\n\tprint(\"\\n\\tFAS Service request\")\n\twhile True:\n\t\tuch = int(raw_input(\"\\n\\n1) Get account\\n2) New account\\n3) Update account\\n4) Get device\\n5) New device\\n6) Update device info\\n7) Remove device\\n8) New domain key\\n9) Update key\\n10) Get key\\n11) Remove key\\n\\n0) Exit\\n\\n\\\\> \"))\n\t\tif (uch == 1):\n\t\t\tacc.get()\n\t\telif(uch == 2):\n\t\t\tacc.new()\n\t\telif(uch == 3):\n\t\t\tacc.update()\n\t\telif(uch == 4):\n\t\t\tdev.get()\n\t\telif(uch == 5):\n\t\t\tdev.activate()\n\t\telif(uch == 6):\n\t\t\tdev.update()\n\t\telif(uch == 7):\n\t\t\tdev.deactivate()\n\t\telif(uch == 8):\n\t\t\tdomk.newkey()\n\t\telif(uch == 9):\n\t\t\tdomk.updatekey()\n\t\telif(uch == 10):\n\t\t\tdomk.getkey()\n\t\telif(uch == 11):\n\t\t\tdomk.removekey()\n\t\telif(uch == 0):\n\t\t\tbreak\n\n\t\t\t","sub_path":"ftest.py","file_name":"ftest.py","file_ext":"py","file_size_in_byte":6986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"305775709","text":"from tkinter import *\nimport pyautogui\n\nclass Widget:\n def __init__(self, parent, spacing, logger):\n self.parent = parent\n\n self.labelOne = Label(parent, text='x:')\n self.labelOne.grid(row=0, column=spacing, sticky=E)\n\n self.x = Entry(parent, width=5)\n self.x.grid(row=0, column=spacing+1)\n \n self.labelTwo = Label(parent, text='y:')\n self.labelTwo.grid(row=1, column=spacing, sticky=E)\n\n self.y = Entry(parent, width=5)\n self.y.grid(row=1, column=spacing+1)\n\n def run(self):\n pyautogui.moveTo(int(self.x.get()), int(self.y.get()))\n\n def returnSettings(self):\n settings = {}\n settings['x'] = self.x.get()\n settings['y'] = self.y.get()\n return settings\n\n def addSettings(self, settings):\n self.x.delete(0,END)\n self.x.insert(0, settings['x'])\n self.y.delete(0,END)\n self.y.insert(0, settings['y'])\n","sub_path":"options/sequential/moveMouseTo.py","file_name":"moveMouseTo.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"419713763","text":"import env\n\nimport pymongo\nimport time\n\nfrom models import mppt60, mppt600, mate3\n\n\n'''\nSolar controller polling script\n'''\n \n\n\ndef connectDB() -> pymongo.MongoClient:\n '''\n Connects to the MongoB replica set and returns the client\n Could not get the automatic failover to work, so just try each node individually.\n '''\n # Try the first node\n try:\n print(\"[INFO] Attempting to connect to MongoDB node 01...\")\n client = pymongo.MongoClient(env.mongodb_uri_01, serverSelectionTimeoutMS=500)\n client.server_info()\n print(\"[INFO] Connected to MongoDB node 01!\")\n connected = True\n except pymongo.errors.ServerSelectionTimeoutError:\n print(\"[ERROR] Failed to connect to MongoDB node 01\")\n connected = False\n\n # Try the second node\n if not connected:\n try:\n print(\"[INFO] Attempting to connect to MongoDB node 02...\")\n client = pymongo.MongoClient(env.mongodb_uri_02, serverSelectionTimeoutMS=500)\n client.server_info()\n print(\"[INFO] Connected to MongoDB node 02!\")\n client.server_info()\n connected = True\n except pymongo.errors.ServerSelectionTimeoutError:\n print(\"[ERROR] Failed to connect to MongoDB node 02\")\n connected = False\n \n # Try the third node\n if not connected:\n try:\n print(\"[INFO] Attempting to connect to MongoDB node 03...\")\n client = pymongo.MongoClient(env.mongodb_uri_03, serverSelectionTimeoutMS=500)\n client.server_info()\n print(\"[INFO] Connected to MongoDB node 03!\")\n connected = True\n except pymongo.errors.ServerSelectionTimeoutError:\n print(\"[ERROR] Failed to connect to MongoDB node 03\")\n connected = False\n\n if connected:\n db = client[env.database]\n return(db)\n \n else:\n return(False)\n\n\ndef run() -> None:\n ''' \n Continually checks for new MPPT Controllers and polls the modbus coils\n '''\n\n while True:\n # Attempt MongoDB connection\n db = connectDB()\n\n # If connected\n if db:\n while True:\n try:\n # Get tristar documents\n controllers = db['devices_aggregated'].find(\n {\n 'device_type.poller' : env.poller\n }\n )\n\n # Iterate controllers\n for controller in controllers:\n if controller['device_type']['name'] == \"Tristar MPPT 60\":\n mppt60.parse(controller, db, env)\n elif controller['device_type']['name'] == \"Tristar MPPT 600V\":\n mppt600.parse(controller, db, env)\n elif controller['device_type']['name'] == \"Mate3\":\n mate3.parse(controller, db, env)\n\n print(f\"[INFO] Polled {controller['device']['name']}\")\n\n print(\"[SLEEP] Sleeping 5 sec...\")\n time.sleep(5)\n except pymongo.errors.WriteConcernError:\n print(\"[ERROR] - Mongo connection error.\")\n break\n except pymongo.errors.NotMasterError:\n print(\"[ERROR] - Mongo master error.\")\n break\n except pymongo.errors.ServerSelectionTimeoutError as e:\n print(f\"[ERROR] - {e}\")\n break\n\n print(\"[ERROR] Unable to connect, sleeping for 10s...\")\n time.sleep(10)\n\n\nif __name__ == '__main__':\n run()\n","sub_path":"backend/mm_solar_rt/src/solar.py","file_name":"solar.py","file_ext":"py","file_size_in_byte":3689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"71637396","text":"# -*- coding: utf-8 -*-\n\nimport platform\nimport socket\nimport sys\nimport uuid\n\nimport pytest\n\nfrom getmac import get_mac_address, getmac\n\nPY2 = sys.version_info[0] == 2\nMAC_RE_COLON = r'([0-9a-fA-F]{2}(?::[0-9a-fA-F]{2}){5})'\nMAC_RE_DASH = r'([0-9a-fA-F]{2}(?:-[0-9a-fA-F]{2}){5})'\n\n\ndef test_get_mac_address_localhost():\n assert get_mac_address(hostname='localhost') == '00:00:00:00:00:00'\n assert get_mac_address(ip='127.0.0.1') == '00:00:00:00:00:00'\n result = get_mac_address(hostname='localhost', network_request=False)\n assert result == '00:00:00:00:00:00'\n\n\ndef test_search(get_sample):\n text = get_sample('ifconfig.out')\n regex = r'HWaddr ' + MAC_RE_COLON\n assert getmac._search(regex, text, 0) == '74:d4:35:e9:45:71'\n\n\ndef test_popen(mocker):\n mocker.patch('getmac.getmac.PATH', [])\n m = mocker.patch('getmac.getmac._call_proc', return_value='SUCCESS')\n assert getmac._popen('TESTCMD', 'ARGS') == 'SUCCESS'\n m.assert_called_once_with('TESTCMD', 'ARGS')\n\n\ndef test_call_proc(mocker):\n mocker.patch('getmac.getmac.DEVNULL', 'DEVNULL')\n mocker.patch('getmac.getmac.ENV', 'ENV')\n\n mocker.patch('getmac.getmac.WINDOWS', True)\n m = mocker.patch('getmac.getmac.check_output', return_value='WINSUCCESS')\n assert getmac._call_proc('CMD', 'arg') == 'WINSUCCESS'\n m.assert_called_once_with('CMD' + ' ' + 'arg', stderr='DEVNULL', env='ENV')\n\n mocker.patch('getmac.getmac.WINDOWS', False)\n m = mocker.patch('getmac.getmac.check_output', return_value='YAY')\n assert getmac._call_proc('CMD', 'arg1 arg2') == 'YAY'\n m.assert_called_once_with(['CMD', 'arg1', 'arg2'], stderr='DEVNULL', env='ENV')\n\n\n@pytest.mark.skipif(platform.system() != 'Linux',\n reason=\"Can't reliably mock fcntl on non-Linux platforms\")\ndef test_fcntl_iface(mocker):\n data = b'enp3s0\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x01\\x00t\\xd45\\xe9' \\\n b'Es\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00'\n mocker.patch('fcntl.ioctl', return_value=data)\n m = mocker.patch('socket.socket')\n assert getmac._fcntl_iface('enp3s0') == '74:d4:35:e9:45:73'\n m.assert_called_once_with(socket.AF_INET, socket.SOCK_DGRAM)\n\n\n# Python 2.7.5 (CentOS 7) doesn't have this...\n# The commit adding it: https://bit.ly/2Hnd7bN (no idea what release it was in)\n@pytest.mark.skipif(not hasattr(uuid, '_arp_getnode'),\n reason=\"This version of Python doesn't have _arp_getnode\")\ndef test_uuid_ip(mocker):\n mocker.patch('uuid._arp_getnode', return_value=278094213753144)\n assert getmac._uuid_ip('10.0.0.1') == 'FC:EC:DA:D3:29:38'\n mocker.patch('uuid._arp_getnode', return_value=None)\n assert getmac._uuid_ip('10.0.0.1') is None\n assert getmac._uuid_ip('en0') is None\n\n\ndef test_uuid_lanscan_iface(mocker):\n mocker.patch('uuid._find_mac', return_value=2482700837424)\n assert getmac._uuid_lanscan_iface('en1') == '02:42:0C:80:62:30'\n mocker.patch('uuid._find_mac', return_value=None)\n assert getmac._uuid_lanscan_iface('10.0.0.1') is None\n assert getmac._uuid_lanscan_iface('en0') is None\n\n\ndef test_uuid_convert():\n assert getmac._uuid_convert(2482700837424) == '02:42:0C:80:62:30'\n assert getmac._uuid_convert(278094213753144) == 'FC:EC:DA:D3:29:38'\n\n\ndef test_read_sys_iface_file(mocker):\n mocker.patch('getmac.getmac._read_file', return_value='00:0c:29:b5:72:37\\n')\n assert getmac._read_sys_iface_file('ens33') == '00:0c:29:b5:72:37\\n'\n mocker.patch('getmac.getmac._read_file', return_value=None)\n assert getmac._read_sys_iface_file('ens33') is None\n\n\ndef test_read_arp_file(mocker, get_sample):\n data = get_sample('ubuntu_18.10/proc_net_arp.out')\n mocker.patch('getmac.getmac._read_file', return_value=data)\n assert getmac._read_arp_file('192.168.16.2') == '00:50:56:e1:a8:4a'\n assert getmac._read_arp_file('192.168.16.254') == '00:50:56:e8:32:3c'\n assert getmac._read_arp_file('192.168.95.1') == '00:50:56:c0:00:0a'\n assert getmac._read_arp_file('192.168.95.254') == '00:50:56:fa:b7:54'\n\n\ndef test_read_file_return(mocker, get_sample):\n data = get_sample('ifconfig.out')\n mock_open = mocker.mock_open(read_data=data)\n if PY2:\n mocker.patch('__builtin__.open', mock_open)\n else:\n mocker.patch('builtins.open', mock_open)\n assert getmac._read_file('ifconfig.out') == data\n mock_open.assert_called_once_with('ifconfig.out')\n\n\ndef test_read_file_not_exist():\n assert getmac._read_file('DOESNOTEXIST') is None\n\n\ndef test_fetch_ip_using_dns(mocker):\n m = mocker.patch('socket.socket')\n m.return_value.getsockname.return_value = ('1.2.3.4', 51327)\n assert getmac._fetch_ip_using_dns() == '1.2.3.4'\n","sub_path":"tests/test_getmac.py","file_name":"test_getmac.py","file_ext":"py","file_size_in_byte":4677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"176186084","text":"import io\nimport pickle\nfrom collections import Counter\n\nfrom datasketch import MinHash\n\nfrom renappy import Renappy\n\n\nclass InMemoryCluster:\n def __init__(self, minhash_host='localhost:6379', secondary_index_host=None, minhash_db=1, secondary_index_db=2,\n num_perm=128, seed=1, load_data_per=10000):\n if not secondary_index_host:\n secondary_index_host = minhash_host\n\n # minhash redis host, minhash redis port\n mr_host, mr_port = minhash_host.split(\":\")\n\n # secondary index redis host, secondary index redis port\n sr_host, sr_port = secondary_index_host.split(\":\")\n\n # minhash redis\n self.m_r = Renappy(host=mr_host, port=mr_port, db=minhash_db)\n # secondary index redis\n self.s_r = Renappy(host=sr_host, port=sr_port, db=secondary_index_db)\n self.num_perm = num_perm\n self.load_data_per = load_data_per\n self.seed = seed\n\n def flush_all(self):\n self.m_r.flushdb()\n self.s_r.flushdb()\n\n def init_cluster(self, data):\n for key in data.keys():\n self._generate_minhash_and_save(key, data[key])\n keys = self.m_r.keys()\n self._generate_and_save_secondary_index(keys)\n\n def update_cluster(self, data):\n for key in data.keys():\n if self.m_r.exists(key):\n self._update_secondary_index(data, key)\n else:\n self._generate_minhash_and_save(key, data[key])\n self._generate_and_save_secondary_index([key])\n\n def load_minhash_objs(self, keys):\n byte_streams = [io.BytesIO(byte_obj) for byte_obj in self.m_r.mget_str(keys)]\n objs = []\n\n for byte_stream in byte_streams:\n unpickler = pickle.Unpickler(byte_stream)\n objs.append(unpickler.load())\n\n return objs\n\n def search_secondary_index(self, hash_obj):\n keys = []\n for i, hash_value in enumerate(hash_obj.digest()):\n secondary_key = '{}-{}'.format(i, hash_value)\n\n l = self.s_r.lrange(secondary_key, 0, -1)\n keys.extend(l)\n\n return keys\n\n def most_common(self, key, count=10):\n minhash_obj = self.load_minhash_objs([key])[0]\n ssi = self.search_secondary_index(minhash_obj)\n # Remove self element\n return Counter(ssi).most_common(count)[1:]\n\n def _update_secondary_index(self, data, key):\n byte_stream = io.BytesIO(self.m_r.get_str(key))\n unpickler = pickle.Unpickler(byte_stream)\n hash_func = unpickler.load()\n org_hash_values = hash_func.digest()\n for stream in data[key]:\n hash_func.update(stream.encode('utf-8'))\n update_hash_values = hash_func.digest()\n need_remove_secondary_index = []\n for i in range(len(org_hash_values)):\n if org_hash_values[i] != update_hash_values[i]:\n need_remove_secondary_index.append(i)\n pipe = self.s_r.pipeline()\n for i in need_remove_secondary_index:\n org_secondary_key = '{}-{}'.format(i, org_hash_values[i])\n pipe.lrem(org_secondary_key, 1, key)\n update_secondary_key = '{}-{}'.format(i, update_hash_values[i])\n pipe.lpush(update_secondary_key, key)\n self.m_r.set_str(key, self._obj_to_byte_array(hash_func))\n pipe.execute()\n\n def _generate_minhash_and_save(self, key, stream):\n hash_func = self._generate_minhash_func(key, stream)\n byte_array = self._obj_to_byte_array(hash_func)\n self.m_r.set_str(key, byte_array)\n\n def _load_minhash(self, data, per):\n data_len = len(data)\n num_iter = data_len // per\n for index in range(num_iter + 1):\n begin = index * per\n end = begin + per\n if end > data_len:\n batch_data = data[begin:]\n else:\n batch_data = data[begin:end]\n\n byte_objs = self.m_r.mget_str(batch_data)\n byte_streams = [io.BytesIO(byte_obj) for byte_obj in byte_objs]\n minhash_objs = []\n for key, byte_stream in zip(batch_data, byte_streams):\n unpickler = pickle.Unpickler(byte_stream)\n minhash_objs.append((key, unpickler.load()))\n yield minhash_objs\n yield None\n\n def _create_and_push_secondary_index(self, key, hash_obj):\n for i, hash_value in enumerate(hash_obj.digest()):\n secondary_key = '{}-{}'.format(i, hash_value)\n self.s_r.push_batch(secondary_key, key)\n\n def _generate_and_save_secondary_index(self, keys):\n batch_objs = self._load_minhash(keys, self.load_data_per)\n while True:\n batch = next(batch_objs)\n if not batch:\n break\n for key, hash_obj in batch:\n self._create_and_push_secondary_index(key, hash_obj)\n self.s_r.pipe_force_execute()\n\n def _generate_minhash_func(self, key, streams):\n exist_stream = None\n if self.m_r.exists(key):\n exist_stream = self.m_r.get_str(key)\n\n if exist_stream:\n hash_func = pickle.load(exist_stream)\n else:\n hash_func = MinHash(num_perm=self.num_perm, seed=self.seed)\n\n for stream in streams:\n hash_func.update(stream.encode('utf-8'))\n\n return hash_func\n\n @staticmethod\n def _obj_to_byte_array(obj):\n byte_stream = io.BytesIO()\n pickler = pickle.Pickler(byte_stream)\n\n pickler.dump(obj)\n byte_stream.seek(0)\n byte_array = byte_stream.read()\n return byte_array\n\n\n\n\n\n","sub_path":"in_memory_cluster.py","file_name":"in_memory_cluster.py","file_ext":"py","file_size_in_byte":5609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"627224977","text":"def TopoSort(graph):\n in_degrees = dict((u,0) for u in graph) #初始化所有顶点入度��0\n vertex_num = len(in_degrees)\t\n for u in graph:\n for v in graph[u]:\n in_degrees[v] += 1 #计算每个顶点的入度\n\t\t\t\n D = [u for u in in_degrees if in_degrees[u] == 0] # 筛选入度为0的顶点\n result = []\n while D:\n u = D.pop() #默认从最后一个删除\n result.append(u)\n for v in graph[u]:\n in_degrees[v] -= 1 #移除其所有指向\n if in_degrees[v] == 0:\n D.append(v) #再次筛选入度为0的顶点\n\t\t\t\t\n if len(result) == vertex_num: #如果循环结束后存在非0入度的顶点说明图中有环\n return result\n else:\n print(\"There are circuit.\")\n\t\t\nG = {\n 'a':'bce',\n 'b':'d',\n 'c':'d',\n 'd':'e',\n 'e':'cd'\n}\nprint(TopoSort(G))\n","sub_path":"168206139/04.py","file_name":"04.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"614908570","text":"from __future__ import print_function\nimport csv\nimport os\nimport time\nimport unittest\nfrom io import open\n\nimport bq_utils\nimport common\nimport app_identity\nimport gcs_utils\nfrom gcloud.gcs import StorageClient\nimport resources\nfrom tests import test_util\nfrom tests.test_util import (FAKE_HPO_ID, FIVE_PERSONS_PERSON_CSV,\n PITT_FIVE_PERSONS_OBSERVATION_CSV)\nfrom validation.achilles import ACHILLES_TABLES\n\n\nclass BqUtilsTest(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n print('**************************************************************')\n print(cls.__name__)\n print('**************************************************************')\n\n def setUp(self):\n self.hpo_bucket = gcs_utils.get_hpo_bucket(FAKE_HPO_ID)\n self.person_table_id = bq_utils.get_table_id(FAKE_HPO_ID, common.PERSON)\n self.dataset_id = bq_utils.get_dataset_id()\n test_util.delete_all_tables(self.dataset_id)\n self.project_id = app_identity.get_application_id()\n self.TEST_FIELDS = [\n {\n \"type\": \"integer\",\n \"name\": \"integer_field\",\n \"mode\": \"required\",\n \"description\": \"An integer field\"\n },\n # DC-586 Import RDR rules should support null fields\n {\n \"type\": \"integer\",\n \"name\": \"nullable_integer_field\",\n \"mode\": \"nullable\",\n \"description\": \"A nullable integer field\"\n },\n {\n \"type\": \"string\",\n \"name\": \"string_field\",\n \"mode\": \"required\",\n \"description\": \"A string field\"\n },\n {\n \"type\": \"date\",\n \"name\": \"date_field\",\n \"mode\": \"required\",\n \"description\": \"A date field\"\n },\n {\n \"type\": \"timestamp\",\n \"name\": \"timestamp_field\",\n \"mode\": \"required\",\n \"description\": \"A timestamp field\"\n },\n {\n \"type\": \"boolean\",\n \"name\": \"boolean_field\",\n \"mode\": \"required\",\n \"description\": \"A boolean field\"\n },\n {\n \"type\": \"float\",\n \"name\": \"float_field\",\n \"mode\": \"required\",\n \"description\": \"A float field\"\n }\n ]\n self.DT_FORMAT = '%Y-%m-%d %H:%M:%S'\n self._empty_bucket()\n\n def _empty_bucket(self):\n bucket_items = gcs_utils.list_bucket(self.hpo_bucket)\n for bucket_item in bucket_items:\n gcs_utils.delete_object(self.hpo_bucket, bucket_item['name'])\n\n def _drop_tables(self):\n tables = bq_utils.list_tables()\n for table in tables:\n table_id = table['tableReference']['tableId']\n if table_id not in common.VOCABULARY_TABLES:\n bq_utils.delete_table(table_id)\n\n def _table_has_clustering(self, table_info):\n clustering = table_info.get('clustering')\n self.assertIsNotNone(clustering)\n fields = clustering.get('fields')\n self.assertSetEqual(set(fields), {'person_id'})\n time_partitioning = table_info.get('timePartitioning')\n self.assertIsNotNone(time_partitioning)\n tpe = time_partitioning.get('type')\n self.assertEqual(tpe, 'DAY')\n\n def test_load_csv(self):\n app_id = app_identity.get_application_id()\n table_name = 'achilles_analysis'\n csv_file_name = table_name + '.csv'\n local_csv_path = os.path.join(test_util.TEST_DATA_EXPORT_PATH,\n csv_file_name)\n sc = StorageClient()\n sc_bucket = sc.get_bucket(self.hpo_bucket)\n bucket_blob = sc_bucket.blob(csv_file_name)\n with open(local_csv_path, 'rb') as fp:\n bucket_blob.upload_from_file(fp)\n hpo_bucket = self.hpo_bucket\n gcs_object_path = 'gs://%(hpo_bucket)s/%(csv_file_name)s' % locals()\n dataset_id = self.dataset_id\n load_results = bq_utils.load_csv(table_name, gcs_object_path, app_id,\n dataset_id, table_name)\n\n load_job_id = load_results['jobReference']['jobId']\n incomplete_jobs = bq_utils.wait_on_jobs([load_job_id])\n self.assertEqual(len(incomplete_jobs), 0,\n 'loading table {} timed out'.format(table_name))\n query_response = bq_utils.query('SELECT COUNT(1) FROM %(table_name)s' %\n locals())\n self.assertEqual(query_response['kind'], 'bigquery#queryResponse')\n\n def test_load_cdm_csv(self):\n sc = StorageClient()\n sc_bucket = sc.get_bucket(self.hpo_bucket)\n bucket_blob = sc_bucket.blob('person.csv')\n with open(FIVE_PERSONS_PERSON_CSV, 'rb') as fp:\n bucket_blob.upload_from_file(fp)\n result = bq_utils.load_cdm_csv(FAKE_HPO_ID, common.PERSON)\n self.assertEqual(result['status']['state'], 'RUNNING')\n load_job_id = result['jobReference']['jobId']\n table_id = result['configuration']['load']['destinationTable'][\n 'tableId']\n incomplete_jobs = bq_utils.wait_on_jobs([load_job_id])\n self.assertEqual(len(incomplete_jobs), 0,\n 'loading table {} timed out'.format(table_id))\n table_info = bq_utils.get_table_info(table_id)\n num_rows = table_info.get('numRows')\n self.assertEqual(num_rows, '5')\n\n def test_query_result(self):\n sc = StorageClient()\n sc_bucket = sc.get_bucket(self.hpo_bucket)\n bucket_blob = sc_bucket.blob('person.csv')\n with open(FIVE_PERSONS_PERSON_CSV, 'rb') as fp:\n bucket_blob.upload_from_file(fp)\n result = bq_utils.load_cdm_csv(FAKE_HPO_ID, common.PERSON)\n load_job_id = result['jobReference']['jobId']\n incomplete_jobs = bq_utils.wait_on_jobs([load_job_id])\n self.assertEqual(len(incomplete_jobs), 0,\n 'loading table {} timed out'.format(common.PERSON))\n table_id = bq_utils.get_table_id(FAKE_HPO_ID, common.PERSON)\n q = 'SELECT person_id FROM %s' % table_id\n result = bq_utils.query(q)\n self.assertEqual(5, int(result['totalRows']))\n\n def test_create_table(self):\n table_id = 'some_random_table_id'\n fields = [\n dict(name='person_id', type='integer', mode='required'),\n dict(name='name', type='string', mode='nullable')\n ]\n result = bq_utils.create_table(table_id, fields)\n self.assertTrue('kind' in result)\n self.assertEqual(result['kind'], 'bigquery#table')\n table_info = bq_utils.get_table_info(table_id)\n self._table_has_clustering(table_info)\n\n def test_create_existing_table_without_drop_raises_error(self):\n table_id = 'some_random_table_id'\n fields = [\n dict(name='id', type='integer', mode='required'),\n dict(name='name', type='string', mode='nullable')\n ]\n bq_utils.create_table(table_id, fields)\n with self.assertRaises(bq_utils.InvalidOperationError):\n bq_utils.create_table(table_id, fields, drop_existing=False)\n\n def test_create_table_drop_existing_success(self):\n table_id = 'some_random_table_id'\n fields = [\n dict(name='id', type='integer', mode='required'),\n dict(name='name', type='string', mode='nullable')\n ]\n result_1 = bq_utils.create_table(table_id, fields)\n # sanity check\n table_id = result_1['tableReference']['tableId']\n self.assertTrue(bq_utils.table_exists(table_id))\n result_2 = bq_utils.create_table(table_id, fields, drop_existing=True)\n # same id and second one created after first one\n self.assertEqual(result_1['id'], result_2['id'])\n self.assertTrue(result_2['creationTime'] > result_1['creationTime'])\n\n def test_create_standard_table(self):\n standard_tables = list(resources.CDM_TABLES) + ACHILLES_TABLES\n for standard_table in standard_tables:\n table_id = f'prefix_for_test_{standard_table}'\n result = bq_utils.create_standard_table(standard_table, table_id)\n self.assertTrue('kind' in result)\n self.assertEqual(result['kind'], 'bigquery#table')\n # sanity check\n self.assertTrue(bq_utils.table_exists(table_id))\n\n def test_load_ehr_observation(self):\n hpo_id = 'pitt'\n dataset_id = self.dataset_id\n table_id = bq_utils.get_table_id(hpo_id, table_name='observation')\n q = 'SELECT observation_id FROM {dataset_id}.{table_id} ORDER BY observation_id'.format(\n dataset_id=dataset_id, table_id=table_id)\n expected_observation_ids = [\n int(row['observation_id'])\n for row in resources.csv_to_list(PITT_FIVE_PERSONS_OBSERVATION_CSV)\n ]\n sc = StorageClient()\n sc_bucket = sc.get_bucket(gcs_utils.get_hpo_bucket(hpo_id))\n bucket_blob = sc_bucket.blob('observation.csv')\n with open(PITT_FIVE_PERSONS_OBSERVATION_CSV, 'rb') as fp:\n bucket_blob.upload_from_file(fp)\n result = bq_utils.load_cdm_csv(hpo_id, 'observation')\n job_id = result['jobReference']['jobId']\n incomplete_jobs = bq_utils.wait_on_jobs([job_id])\n self.assertEqual(len(incomplete_jobs), 0,\n 'pitt_observation load job did not complete')\n load_job_result = bq_utils.get_job_details(job_id)\n load_job_result_status = load_job_result['status']\n load_job_errors = load_job_result_status.get('errors')\n self.assertIsNone(load_job_errors,\n msg='pitt_observation load job failed: ' +\n str(load_job_errors))\n query_results_response = bq_utils.query(q)\n query_job_errors = query_results_response.get('errors')\n self.assertIsNone(query_job_errors)\n actual_result = [\n int(row['f'][0]['v']) for row in query_results_response['rows']\n ]\n self.assertCountEqual(actual_result, expected_observation_ids)\n\n def test_load_table_from_csv(self):\n table_id = 'test_csv_table'\n csv_file = 'load_csv_test_data.csv'\n csv_path = os.path.join(test_util.TEST_DATA_PATH, csv_file)\n with open(csv_path, 'r') as f:\n expected = list(csv.DictReader(f))\n bq_utils.load_table_from_csv(self.project_id, self.dataset_id, table_id,\n csv_path, self.TEST_FIELDS)\n q = \"\"\" SELECT *\n FROM `{project_id}.{dataset_id}.{table_id}`\"\"\".format(\n project_id=self.project_id,\n dataset_id=self.dataset_id,\n table_id=table_id)\n r = bq_utils.query(q)\n actual = bq_utils.response2rows(r)\n\n # Convert the epoch times to datetime with time zone\n for row in actual:\n row['timestamp_field'] = time.strftime(\n self.DT_FORMAT + ' UTC', time.gmtime(row['timestamp_field']))\n expected.sort(key=lambda row: row['integer_field'])\n actual.sort(key=lambda row: row['integer_field'])\n for i, _ in enumerate(expected):\n self.assertCountEqual(expected[i], actual[i])\n\n def test_get_hpo_info(self):\n hpo_info = bq_utils.get_hpo_info()\n self.assertGreater(len(hpo_info), 0)\n\n def test_csv_line_to_sql_row_expr(self):\n fields = [{\n 'name': 'nullable_date_col',\n 'type': 'date',\n 'mode': 'nullable',\n 'description': ''\n }, {\n 'name': 'nullable_float_col',\n 'type': 'float',\n 'mode': 'nullable',\n 'description': ''\n }, {\n 'name': 'nullable_integer_col',\n 'type': 'integer',\n 'mode': 'nullable',\n 'description': ''\n }, {\n 'name': 'nullable_string_col',\n 'type': 'string',\n 'mode': 'nullable',\n 'description': ''\n }, {\n 'name': 'nullable_timestamp_col',\n 'type': 'timestamp',\n 'mode': 'nullable',\n 'description': ''\n }, {\n 'name': 'required_date_col',\n 'type': 'date',\n 'mode': 'required',\n 'description': ''\n }, {\n 'name': 'required_float_col',\n 'type': 'float',\n 'mode': 'required',\n 'description': ''\n }, {\n 'name': 'required_integer_col',\n 'type': 'integer',\n 'mode': 'required',\n 'description': ''\n }, {\n 'name': 'required_string_col',\n 'type': 'string',\n 'mode': 'required',\n 'description': ''\n }, {\n 'name': 'required_timestamp_col',\n 'type': 'timestamp',\n 'mode': 'required',\n 'description': ''\n }]\n\n # dummy values for each type\n flt_str = \"3.14\"\n int_str = \"1234\"\n str_str = \"abc\"\n dt_str = \"2019-01-01\"\n ts_str = \"2019-01-01 14:00:00.0\"\n row = {\n 'nullable_date_col': dt_str,\n 'nullable_float_col': flt_str,\n 'nullable_integer_col': int_str,\n 'nullable_string_col': str_str,\n 'nullable_timestamp_col': ts_str,\n 'required_date_col': dt_str,\n 'required_float_col': flt_str,\n 'required_integer_col': int_str,\n 'required_string_col': str_str,\n 'required_timestamp_col': ts_str\n }\n # all fields populated\n expected_expr = f\"('{dt_str}',{flt_str},{int_str},'{str_str}','{ts_str}','{dt_str}',{flt_str},{int_str},'{str_str}','{ts_str}')\"\n actual_expr = bq_utils.csv_line_to_sql_row_expr(row, fields)\n self.assertEqual(expected_expr, actual_expr)\n\n # nullable int zero is converted\n row['nullable_integer_col'] = '0'\n expected_expr = f\"('{dt_str}',{flt_str},0,'{str_str}','{ts_str}','{dt_str}',{flt_str},{int_str},'{str_str}','{ts_str}')\"\n actual_expr = bq_utils.csv_line_to_sql_row_expr(row, fields)\n self.assertEqual(expected_expr, actual_expr)\n\n # empty nullable is converted null\n row['nullable_date_col'] = ''\n row['nullable_float_col'] = ''\n row['nullable_integer_col'] = ''\n row['nullable_string_col'] = ''\n row['nullable_timestamp_col'] = ''\n expected_expr = f\"(NULL,NULL,NULL,NULL,NULL,'{dt_str}',{flt_str},{int_str},'{str_str}','{ts_str}')\"\n actual_expr = bq_utils.csv_line_to_sql_row_expr(row, fields)\n self.assertEqual(expected_expr, actual_expr)\n\n # empty required string converted to empty string value\n row['required_string_col'] = ''\n actual_expr = bq_utils.csv_line_to_sql_row_expr(row, fields)\n expected_expr = f\"(NULL,NULL,NULL,NULL,NULL,'{dt_str}',{flt_str},{int_str},'','{ts_str}')\"\n self.assertEqual(expected_expr, actual_expr)\n\n # empty required field raises error\n row['required_integer_col'] = ''\n with self.assertRaises(bq_utils.InvalidOperationError) as c:\n bq_utils.csv_line_to_sql_row_expr(row, fields)\n self.assertEqual(\n c.exception.msg,\n f'Value not provided for required field required_integer_col')\n\n def tearDown(self):\n test_util.delete_all_tables(self.dataset_id)\n self._empty_bucket()\n","sub_path":"tests/integration_tests/data_steward/bq_utils_test.py","file_name":"bq_utils_test.py","file_ext":"py","file_size_in_byte":15587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"178529554","text":"# Copyright (c) 2016 Red Hat, Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport ddt\n\nimport mock\n\nfrom oslo_config import cfg\nfrom oslo_utils import timeutils\n\nfrom cinder.common import constants\nfrom cinder import exception\nfrom cinder import objects\nfrom cinder.objects import fields\nfrom cinder.tests.unit import fake_service\nfrom cinder.tests.unit import utils\nfrom cinder.tests.unit import volume as base\nimport cinder.volume\nfrom cinder.volume import manager\nfrom cinder.volume import rpcapi as volume_rpcapi\n\n\nCONF = cfg.CONF\n\n\n@ddt.ddt\nclass ReplicationTestCase(base.BaseVolumeTestCase):\n def setUp(self):\n super(ReplicationTestCase, self).setUp()\n self.host = 'host@backend#pool'\n self.manager = manager.VolumeManager(host=self.host)\n\n @mock.patch('cinder.objects.VolumeList.get_all')\n @mock.patch('cinder.volume.driver.BaseVD.failover_host',\n side_effect=exception.InvalidReplicationTarget(''))\n @ddt.data(('backend2', 'default', fields.ReplicationStatus.FAILED_OVER),\n ('backend2', 'backend3', fields.ReplicationStatus.FAILED_OVER),\n (None, 'backend2', fields.ReplicationStatus.ENABLED),\n ('', 'backend2', fields.ReplicationStatus.ENABLED))\n @ddt.unpack\n def test_failover_host_invalid_target(self, svc_backend, new_backend,\n expected, mock_failover,\n mock_getall):\n \"\"\"Test replication failover_host with invalid_target.\n\n When failingover fails due to an invalid target exception we return\n replication_status to its previous status, and we decide what that is\n depending on the currect active backend.\n \"\"\"\n svc = utils.create_service(\n self.context,\n {'host': self.host,\n 'binary': constants.VOLUME_BINARY,\n 'active_backend_id': svc_backend,\n 'replication_status': fields.ReplicationStatus.FAILING_OVER})\n\n self.manager.failover_host(self.context, new_backend)\n mock_getall.assert_called_once_with(self.context,\n filters={'host': self.host})\n mock_failover.assert_called_once_with(self.context,\n mock_getall.return_value,\n secondary_id=new_backend)\n\n db_svc = objects.Service.get_by_id(self.context, svc.id)\n self.assertEqual(expected, db_svc.replication_status)\n\n @mock.patch('cinder.volume.driver.BaseVD.failover_host',\n mock.Mock(side_effect=exception.VolumeDriverException('')))\n def test_failover_host_driver_exception(self):\n svc = utils.create_service(\n self.context,\n {'host': self.host,\n 'binary': constants.VOLUME_BINARY,\n 'active_backend_id': None,\n 'replication_status': fields.ReplicationStatus.FAILING_OVER})\n\n self.manager.failover_host(self.context, mock.sentinel.backend_id)\n\n db_svc = objects.Service.get_by_id(self.context, svc.id)\n self.assertEqual(fields.ReplicationStatus.FAILOVER_ERROR,\n db_svc.replication_status)\n\n @mock.patch('cinder.objects.Service.is_up', True)\n @mock.patch.object(volume_rpcapi.VolumeAPI, 'failover')\n @mock.patch.object(cinder.db, 'conditional_update')\n @mock.patch.object(objects.ServiceList, 'get_all')\n def test_failover(self, mock_get_all, mock_db_update, mock_failover):\n \"\"\"Test replication failover.\"\"\"\n\n service = fake_service.fake_service_obj(self.context,\n binary='cinder-volume')\n mock_get_all.return_value = [service]\n mock_db_update.return_value = {'replication_status': 'enabled'}\n volume_api = cinder.volume.api.API()\n volume_api.failover(self.context, host=CONF.host, cluster_name=None)\n mock_failover.assert_called_once_with(self.context, service, None)\n\n @mock.patch.object(volume_rpcapi.VolumeAPI, 'failover')\n @mock.patch.object(cinder.db, 'conditional_update')\n @mock.patch.object(cinder.db, 'service_get_all')\n def test_failover_unexpected_status(self, mock_db_get_all, mock_db_update,\n mock_failover):\n \"\"\"Test replication failover unxepected status.\"\"\"\n\n mock_db_get_all.return_value = [fake_service.fake_service_obj(\n self.context,\n binary='cinder-volume')]\n mock_db_update.return_value = None\n volume_api = cinder.volume.api.API()\n self.assertRaises(exception.InvalidInput,\n volume_api.failover,\n self.context,\n host=CONF.host,\n cluster_name=None)\n\n @mock.patch.object(volume_rpcapi.VolumeAPI, 'freeze_host')\n @mock.patch.object(cinder.db, 'conditional_update', return_value=1)\n @mock.patch.object(cinder.objects.ServiceList, 'get_all')\n def test_freeze_host(self, mock_get_all, mock_db_update,\n mock_freeze):\n \"\"\"Test replication freeze_host.\"\"\"\n\n service = fake_service.fake_service_obj(self.context,\n binary='cinder-volume')\n mock_get_all.return_value = [service]\n mock_freeze.return_value = True\n volume_api = cinder.volume.api.API()\n volume_api.freeze_host(self.context, host=CONF.host, cluster_name=None)\n mock_freeze.assert_called_once_with(self.context, service)\n\n @mock.patch.object(volume_rpcapi.VolumeAPI, 'freeze_host')\n @mock.patch.object(cinder.db, 'conditional_update')\n @mock.patch.object(cinder.db, 'service_get_all')\n def test_freeze_host_unexpected_status(self, mock_get_all,\n mock_db_update,\n mock_freeze):\n \"\"\"Test replication freeze_host unexpected status.\"\"\"\n\n mock_get_all.return_value = [fake_service.fake_service_obj(\n self.context,\n binary='cinder-volume')]\n mock_db_update.return_value = None\n volume_api = cinder.volume.api.API()\n self.assertRaises(exception.InvalidInput,\n volume_api.freeze_host,\n self.context,\n host=CONF.host,\n cluster_name=None)\n\n @mock.patch.object(volume_rpcapi.VolumeAPI, 'thaw_host')\n @mock.patch.object(cinder.db, 'conditional_update', return_value=1)\n @mock.patch.object(cinder.objects.ServiceList, 'get_all')\n def test_thaw_host(self, mock_get_all, mock_db_update,\n mock_thaw):\n \"\"\"Test replication thaw_host.\"\"\"\n\n service = fake_service.fake_service_obj(self.context,\n binary='cinder-volume')\n mock_get_all.return_value = [service]\n mock_thaw.return_value = True\n volume_api = cinder.volume.api.API()\n volume_api.thaw_host(self.context, host=CONF.host, cluster_name=None)\n mock_thaw.assert_called_once_with(self.context, service)\n\n @mock.patch.object(volume_rpcapi.VolumeAPI, 'thaw_host')\n @mock.patch.object(cinder.db, 'conditional_update')\n @mock.patch.object(cinder.db, 'service_get_all')\n def test_thaw_host_unexpected_status(self, mock_get_all,\n mock_db_update,\n mock_thaw):\n \"\"\"Test replication thaw_host unexpected status.\"\"\"\n\n mock_get_all.return_value = [fake_service.fake_service_obj(\n self.context,\n binary='cinder-volume')]\n mock_db_update.return_value = None\n volume_api = cinder.volume.api.API()\n self.assertRaises(exception.InvalidInput,\n volume_api.thaw_host,\n self.context,\n host=CONF.host, cluster_name=None)\n\n @mock.patch('cinder.volume.driver.BaseVD.failover_completed')\n def test_failover_completed(self, completed_mock):\n rep_field = fields.ReplicationStatus\n svc = objects.Service(self.context, host=self.volume.host,\n binary=constants.VOLUME_BINARY,\n replication_status=rep_field.ENABLED)\n svc.create()\n self.volume.failover_completed(\n self.context,\n {'active_backend_id': 'secondary',\n 'replication_status': rep_field.FAILED_OVER})\n service = objects.Service.get_by_id(self.context, svc.id)\n self.assertEqual('secondary', service.active_backend_id)\n self.assertEqual('failed-over', service.replication_status)\n completed_mock.assert_called_once_with(self.context, 'secondary')\n\n @mock.patch('cinder.volume.driver.BaseVD.failover_completed', wraps=True)\n def test_failover_completed_driver_failure(self, completed_mock):\n rep_field = fields.ReplicationStatus\n svc = objects.Service(self.context, host=self.volume.host,\n binary=constants.VOLUME_BINARY,\n replication_status=rep_field.ENABLED)\n svc.create()\n self.volume.failover_completed(\n self.context,\n {'active_backend_id': 'secondary',\n 'replication_status': rep_field.FAILED_OVER})\n service = objects.Service.get_by_id(self.context, svc.id)\n self.assertEqual('secondary', service.active_backend_id)\n self.assertEqual(rep_field.ERROR, service.replication_status)\n self.assertTrue(service.disabled)\n self.assertIsNotNone(service.disabled_reason)\n completed_mock.assert_called_once_with(self.context, 'secondary')\n\n @mock.patch('cinder.volume.rpcapi.VolumeAPI.failover_completed')\n def test_finish_failover_non_clustered(self, completed_mock):\n svc = mock.Mock(is_clustered=None)\n self.volume.finish_failover(self.context, svc, mock.sentinel.updates)\n svc.update.assert_called_once_with(mock.sentinel.updates)\n svc.save.assert_called_once_with()\n completed_mock.assert_not_called()\n\n @mock.patch('cinder.volume.rpcapi.VolumeAPI.failover_completed')\n def test_finish_failover_clustered(self, completed_mock):\n svc = mock.Mock(cluster_name='cluster_name')\n updates = {'status': 'error'}\n self.volume.finish_failover(self.context, svc, updates)\n completed_mock.assert_called_once_with(self.context, svc, updates)\n svc.cluster.status = 'error'\n svc.cluster.save.assert_called_once()\n\n @ddt.data(None, 'cluster_name')\n @mock.patch('cinder.volume.manager.VolumeManager.finish_failover')\n @mock.patch('cinder.volume.manager.VolumeManager._get_my_volumes')\n def test_failover_manager(self, cluster, get_vols_mock, finish_mock):\n \"\"\"Test manager's failover method for clustered and not clustered.\"\"\"\n rep_field = fields.ReplicationStatus\n svc = objects.Service(self.context, host=self.volume.host,\n binary=constants.VOLUME_BINARY,\n cluster_name=cluster,\n replication_status=rep_field.ENABLED)\n svc.create()\n\n vol = objects.Volume(self.context, host=self.volume.host)\n vol.create()\n\n get_vols_mock.return_value = [vol]\n\n with mock.patch.object(self.volume, 'driver') as driver:\n called, not_called = driver.failover_host, driver.failover\n if cluster:\n called, not_called = not_called, called\n\n called.return_value = ('secondary', [{'volume_id': vol.id,\n 'updates': {'status': 'error'}}])\n\n self.volume.failover(self.context,\n secondary_backend_id='secondary')\n\n not_called.assert_not_called()\n called.assert_called_once_with(self.context, [vol],\n secondary_id='secondary')\n\n expected_update = {'replication_status': rep_field.FAILED_OVER,\n 'active_backend_id': 'secondary',\n 'disabled': True,\n 'disabled_reason': 'failed-over'}\n finish_mock.assert_called_once_with(self.context, svc, expected_update)\n\n volume = objects.Volume.get_by_id(self.context, vol.id)\n self.assertEqual('error', volume.status)\n\n @ddt.data(('host1', None), (None, 'mycluster'))\n @ddt.unpack\n def test_failover_api_fail_multiple_results(self, host, cluster):\n \"\"\"Fail if we try to failover multiple backends in the same request.\"\"\"\n rep_field = fields.ReplicationStatus\n clusters = [\n objects.Cluster(self.context,\n name='mycluster@backend1',\n replication_status=rep_field.ENABLED,\n binary=constants.VOLUME_BINARY),\n objects.Cluster(self.context,\n name='mycluster@backend2',\n replication_status=rep_field.ENABLED,\n binary=constants.VOLUME_BINARY)\n ]\n clusters[0].create()\n clusters[1].create()\n services = [\n objects.Service(self.context, host='host1@backend1',\n cluster_name=clusters[0].name,\n replication_status=rep_field.ENABLED,\n binary=constants.VOLUME_BINARY),\n objects.Service(self.context, host='host1@backend2',\n cluster_name=clusters[1].name,\n replication_status=rep_field.ENABLED,\n binary=constants.VOLUME_BINARY),\n ]\n services[0].create()\n services[1].create()\n self.assertRaises(exception.Invalid,\n self.volume_api.failover, self.context, host,\n cluster)\n\n def test_failover_api_not_found(self):\n self.assertRaises(exception.ServiceNotFound, self.volume_api.failover,\n self.context, 'host1', None)\n\n @mock.patch('cinder.volume.rpcapi.VolumeAPI.failover')\n def test_failover_api_success_multiple_results(self, failover_mock):\n \"\"\"Succeed to failover multiple services for the same backend.\"\"\"\n rep_field = fields.ReplicationStatus\n cluster_name = 'mycluster@backend1'\n cluster = objects.Cluster(self.context,\n name=cluster_name,\n replication_status=rep_field.ENABLED,\n binary=constants.VOLUME_BINARY)\n cluster.create()\n services = [\n objects.Service(self.context, host='host1@backend1',\n cluster_name=cluster_name,\n replication_status=rep_field.ENABLED,\n binary=constants.VOLUME_BINARY),\n objects.Service(self.context, host='host2@backend1',\n cluster_name=cluster_name,\n replication_status=rep_field.ENABLED,\n binary=constants.VOLUME_BINARY),\n ]\n services[0].create()\n services[1].create()\n\n self.volume_api.failover(self.context, None, cluster_name,\n mock.sentinel.secondary_id)\n\n for service in services + [cluster]:\n self.assertEqual(rep_field.ENABLED, service.replication_status)\n service.refresh()\n self.assertEqual(rep_field.FAILING_OVER,\n service.replication_status)\n\n failover_mock.assert_called_once_with(self.context, mock.ANY,\n mock.sentinel.secondary_id)\n self.assertEqual(services[0].id, failover_mock.call_args[0][1].id)\n\n @mock.patch('cinder.volume.rpcapi.VolumeAPI.failover')\n def test_failover_api_success_multiple_results_not_updated(self,\n failover_mock):\n \"\"\"Succeed to failover even if a service is not updated.\"\"\"\n rep_field = fields.ReplicationStatus\n cluster_name = 'mycluster@backend1'\n cluster = objects.Cluster(self.context,\n name=cluster_name,\n replication_status=rep_field.ENABLED,\n binary=constants.VOLUME_BINARY)\n cluster.create()\n services = [\n objects.Service(self.context, host='host1@backend1',\n cluster_name=cluster_name,\n replication_status=rep_field.ENABLED,\n binary=constants.VOLUME_BINARY),\n objects.Service(self.context, host='host2@backend1',\n cluster_name=cluster_name,\n replication_status=rep_field.ERROR,\n binary=constants.VOLUME_BINARY),\n ]\n services[0].create()\n services[1].create()\n\n self.volume_api.failover(self.context, None, cluster_name,\n mock.sentinel.secondary_id)\n\n for service in services[:1] + [cluster]:\n service.refresh()\n self.assertEqual(rep_field.FAILING_OVER,\n service.replication_status)\n\n services[1].refresh()\n self.assertEqual(rep_field.ERROR, services[1].replication_status)\n\n failover_mock.assert_called_once_with(self.context, mock.ANY,\n mock.sentinel.secondary_id)\n self.assertEqual(services[0].id, failover_mock.call_args[0][1].id)\n\n @mock.patch('cinder.volume.rpcapi.VolumeAPI.failover')\n def test_failover_api_fail_multiple_results_not_updated(self,\n failover_mock):\n \"\"\"Fail if none of the services could be updated.\"\"\"\n rep_field = fields.ReplicationStatus\n cluster_name = 'mycluster@backend1'\n cluster = objects.Cluster(self.context,\n name=cluster_name,\n replication_status=rep_field.ENABLED,\n binary=constants.VOLUME_BINARY)\n cluster.create()\n down_time = timeutils.datetime.datetime(1970, 1, 1)\n services = [\n # This service is down\n objects.Service(self.context, host='host1@backend1',\n cluster_name=cluster_name,\n replication_status=rep_field.ENABLED,\n created_at=down_time,\n updated_at=down_time,\n modified_at=down_time,\n binary=constants.VOLUME_BINARY),\n # This service is not with the right replication status\n objects.Service(self.context, host='host2@backend1',\n cluster_name=cluster_name,\n replication_status=rep_field.ERROR,\n binary=constants.VOLUME_BINARY),\n ]\n services[0].create()\n services[1].create()\n\n self.assertRaises(exception.InvalidInput,\n self.volume_api.failover, self.context, None,\n cluster_name, mock.sentinel.secondary_id)\n\n for service in services:\n svc = objects.Service.get_by_id(self.context, service.id)\n self.assertEqual(service.replication_status,\n svc.replication_status)\n\n cluster.refresh()\n self.assertEqual(rep_field.ENABLED, cluster.replication_status)\n\n failover_mock.assert_not_called()\n","sub_path":"cinder/tests/unit/volume/test_replication_manager.py","file_name":"test_replication_manager.py","file_ext":"py","file_size_in_byte":20441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"169691347","text":"def solution(begin, target, words):\n answer_list = []\n\n def dfs(w,count,visit):\n if w == begin:\n answer_list.append(count)\n return\n j = 0\n for b in words:\n same_count = 0\n for i in range(len(b)):\n if b[i] == w[i]:\n same_count += 1\n\n if same_count == len(begin) - 1 and visit[j] == False:\n visit[j] = True\n dfs(b,count+1,visit)\n visit[j] = False\n \n j += 1\n\n words.append(begin)\n v = [ False for _ in range(len(words))]\n if target not in words:\n return 0\n \n dfs(target,0,v)\n return min(answer_list)\n\n\nprint(solution(\"hit\",\"cog\",[\"hot\", \"dot\", \"dog\", \"lot\", \"log\", \"cog\"]))\nprint(solution(\"hit\",\"cog\",\t[\"hot\", \"dot\", \"dog\", \"lot\", \"log\"]))","sub_path":"Programmers/Programmers_코딩테스트 연습_DFS,BFS_단어 변환.py","file_name":"Programmers_코딩테스트 연습_DFS,BFS_단어 변환.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"237790677","text":"import tensorflow as tf\nimport numpy as np\nimport csv\n\n\n_PAD_, _SOS_, _EOS_, _UNK_ = 0, 1, 2, 3\n\n# 문제\n# 단어장 파일을 읽어서 반환하는 함수를 만드세요.\ndef read_vocab():\n f = open('chat/vocab.txt', 'r', encoding='utf-8')\n vocab = f.readlines()\n # print(len(vocab)) # 164\n f.close()\n\n return vocab\n\n# 문제\n# 대화를 숫자로 변환한 vectors.txt 파일을 읽어서 반환하는 함수를 만드세요.\ndef read_vectors():\n f = open('chat/vectors.txt', 'r', encoding='utf-8')\n vectors = [[int(col) for col in row] for row in csv.reader(f)]\n # print(vectors[:3]) # [[105], [105], [114, 128, 85, 79]]\n f.close()\n\n return vectors\n\ndef add_pad(seq, max_len):\n seq_len = len(seq)\n if seq_len > max_len:\n return seq[:max_len]\n\n return seq + [_PAD_] * (max_len - seq_len)\n\ndef load_dataset():\n vocab = read_vocab()\n vectors = read_vectors()\n\n # 문제\n # 질문과 대답 데이터에서 가장 긴 토큰의 길이를 구하세요.\n dialog_questions = vectors[::2]\n dialog_answers = vectors[1::2]\n\n max_len_q = max([len(q) for q in dialog_questions])\n max_len_a = max([len(a) for a in dialog_answers]) + 1\n print(max_len_q, max_len_a) # 9 10\n\n # ------------------------------------------------ #\n # 33_2 translation_word.py 파일에서 make_xy 함수 가져옴\n\n # 문제\n # 아래 코드에서 발생하는 에러를 올바르게 수정하세요\n\n onehot = np.eye(len(vocab), dtype=np.float32)\n\n enc_inputs, dec_inputs, dec_output = [], [], []\n for question, answer in zip(dialog_questions, dialog_answers):\n encoder_in = add_pad(question, max_len_q) # 길이를 똑같이 맞춰줘야함\n decoder_in = add_pad([_SOS_] + answer, max_len_a)\n target = add_pad(answer + [_EOS_], max_len_a)\n # print(eng_incoder)\n\n enc_inputs.append(onehot[encoder_in])\n dec_inputs.append(onehot[decoder_in])\n # dec_output.append(onehot[target])\n dec_output.append(target) # 바로 위에 코드보다 y는 원핫으로 안바꾼게 더 좋음\n\n return np.float32(enc_inputs), np.float32(dec_inputs), np.float32(dec_output), vocab\n\n\n# 문제\n# 어린왕자 데이터셋으로 챗봇 모델을 구축하세요. (fit 함수로 학습까지)\n\ndef train_and_save_chatbot():\n enc_inputs, dec_inputs, dec_output, vocab = load_dataset()\n print(enc_inputs.shape, dec_inputs.shape, dec_output.shape) # (52, 9, 164) (52, 10, 164) (52, 10)\n\n # ---------------------------------------------- #\n # show_translation_word 함수를 수정없이 사용함. 에러없이 잘 동작\n n_classes, n_hiddens = len(vocab), 128\n\n # 인코더\n enc_layer = tf.keras.layers.Input(shape=enc_inputs.shape[1:])\n _enc_output, enc_state = tf.keras.layers.SimpleRNN(n_hiddens, return_state=True)(enc_layer)\n\n # 디코더\n dec_layer = tf.keras.layers.Input(shape=dec_inputs.shape[1:])\n output = tf.keras.layers.SimpleRNN(n_hiddens, return_sequences=True)(dec_layer,\n initial_state=enc_state) # state 사용하지 않음, initial_state로 state를 연결받음\n\n output = tf.keras.layers.Dense(n_classes, activation='softmax')(output)\n\n model = tf.keras.Model([enc_layer, dec_layer], output)\n\n model.compile(optimizer='adam',\n loss=tf.keras.losses.sparse_categorical_crossentropy)\n\n model.fit([enc_inputs, dec_inputs], dec_output, epochs=500, verbose=2)\n\n model.save('chat/little_prince.h5')\n\ntrain_and_save_chatbot()\n","sub_path":"Keras - DeepLearning/34_1. chatbot_model.py","file_name":"34_1. chatbot_model.py","file_ext":"py","file_size_in_byte":3597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"272467304","text":"import cv2\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\nPATH = 'imagens/frame0.jpg' \n\nimg = cv2.imread(PATH, cv2.IMREAD_GRAYSCALE)\nplt.imshow(img, cmap='gray')\ncircles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 20, param1=130, param2=30, minRadius=0, maxRadius=0)\nif circles is not None:\n for x, y, r in circles[0]:\n c = plt.Circle((x, y), r, fill=False, lw=3, ec='C1')\n plt.gca().add_patch(c)\nplt.gcf().set_size_inches((12, 8))\nplt.show()","sub_path":"teste1.py","file_name":"teste1.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"211347439","text":"\"\"\"Unit tests for pyatv.conf.\"\"\"\n\nimport unittest\nfrom unittest.mock import MagicMock\n\nfrom pyatv import (conf, const, exceptions)\nfrom pyatv.const import Protocol\n\nADDRESS_1 = '127.0.0.1'\nADDRESS_2 = '192.168.0.1'\nNAME = 'Alice'\nPORT_1 = 1234\nPORT_2 = 5678\nIDENTIFIER_1 = 'id1'\nIDENTIFIER_2 = 'id2'\nCREDENTIALS_1 = 'cred1'\n\n\nclass ConfTest(unittest.TestCase):\n\n def setUp(self):\n self.config = conf.AppleTV(ADDRESS_1, NAME)\n self.service_mock = MagicMock()\n self.service_mock.protocol = const.Protocol.DMAP\n self.service_mock.port = PORT_1\n self.service_mock.identifier = IDENTIFIER_1\n self.service_mock.credentials = None\n\n self.service_mock2 = MagicMock()\n self.service_mock2.protocol = const.Protocol.MRP\n self.service_mock2.port = PORT_2\n self.service_mock2.identifier = IDENTIFIER_2\n\n self.airplay_mock = MagicMock()\n self.airplay_mock.port = PORT_1\n self.airplay_mock.protocol = Protocol.AirPlay\n\n def test_address_and_name(self):\n self.assertEqual(self.config.address, ADDRESS_1)\n self.assertEqual(self.config.name, NAME)\n\n def test_equality(self):\n self.assertEqual(self.config, self.config)\n\n atv2 = conf.AppleTV(ADDRESS_1, NAME)\n atv2.add_service(conf.AirPlayService(IDENTIFIER_1, PORT_1))\n self.assertNotEqual(self.config, atv2)\n\n def test_add_services_and_get(self):\n self.config.add_service(self.service_mock)\n self.config.add_service(self.service_mock2)\n\n services = self.config.services\n self.assertEqual(len(services), 3)\n\n self.assertIn(self.service_mock, services)\n self.assertIn(self.service_mock2, services)\n\n self.assertEqual(\n self.config.get_service(Protocol.DMAP), self.service_mock)\n self.assertEqual(\n self.config.get_service(Protocol.MRP), self.service_mock2)\n self.assertIsNotNone(self.config.get_service(Protocol.AirPlay))\n\n def test_identifier(self):\n self.assertIsNone(self.config.identifier)\n\n self.config.add_service(self.service_mock)\n self.assertEqual(self.config.identifier, IDENTIFIER_1)\n\n self.config.add_service(self.service_mock2)\n self.assertEqual(self.config.identifier, IDENTIFIER_1)\n\n services = self.config.services\n self.assertEqual(len(services), 3)\n self.assertIn(self.service_mock, services)\n self.assertIn(self.service_mock2, services)\n\n def test_default_airplay_service(self):\n airplay = self.config.get_service(Protocol.AirPlay)\n self.assertEqual(airplay.protocol, Protocol.AirPlay)\n self.assertEqual(airplay.port, 7000)\n\n def test_add_airplay_service(self):\n self.config.add_service(self.airplay_mock)\n\n airplay = self.config.get_service(Protocol.AirPlay)\n self.assertEqual(airplay.protocol, Protocol.AirPlay)\n self.assertEqual(airplay.port, PORT_1)\n\n def test_main_service_no_service(self):\n with self.assertRaises(exceptions.NoServiceError):\n self.config.main_service()\n\n def test_main_service_airplay_no_service(self):\n self.config.add_service(self.airplay_mock)\n with self.assertRaises(exceptions.NoServiceError):\n self.config.main_service()\n\n def test_main_service_get_service(self):\n self.config.add_service(self.service_mock)\n self.assertEqual(self.config.main_service(), self.service_mock)\n\n self.config.add_service(self.service_mock2)\n self.assertEqual(self.config.main_service(), self.service_mock2)\n\n def test_main_service_override_protocol(self):\n self.config.add_service(self.service_mock)\n self.config.add_service(self.service_mock2)\n self.assertEqual(\n self.config.main_service(protocol=self.service_mock.protocol),\n self.service_mock)\n\n def test_set_credentials_for_missing_service(self):\n self.assertFalse(self.config.set_credentials(Protocol.DMAP, 'dummy'))\n\n def test_set_credentials(self):\n self.config.add_service(self.service_mock)\n self.assertIsNone(self.config.get_service(Protocol.DMAP).credentials)\n\n self.config.set_credentials(Protocol.DMAP, 'dummy')\n self.assertEqual(\n self.config.get_service(Protocol.DMAP).credentials, 'dummy')\n\n # This test is a bit strange and couples to protocol specific services,\n # but it's mainly to exercise string as that is important. Might refactor\n # this in the future.\n def test_to_str(self):\n self.config.add_service(conf.DmapService(IDENTIFIER_1, 'LOGIN_ID'))\n self.config.add_service(conf.MrpService(IDENTIFIER_2, PORT_2))\n\n # Check for some keywords to not lock up format too much\n output = str(self.config)\n self.assertIn(ADDRESS_1, output)\n self.assertIn(NAME, output)\n self.assertIn('LOGIN_ID', output)\n self.assertIn(str(PORT_2), output)\n self.assertIn('3689', output)\n","sub_path":"tests/test_conf.py","file_name":"test_conf.py","file_ext":"py","file_size_in_byte":4998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"623199246","text":"\"\"\"\nCopyright (c) 2017 Red Hat, Inc\nAll rights reserved.\n\nThis software may be modified and distributed under the terms\nof the BSD license. See the LICENSE file for details.\n\"\"\"\n\nfrom flexmock import flexmock\n\nimport json\nimport responses\nimport os\nimport pytest\nimport six\nfrom six.moves.urllib.parse import urlparse, parse_qs\n\nfrom atomic_reactor.inner import DockerBuildWorkflow\ntry:\n from atomic_reactor.plugins.pre_resolve_module_compose import (ResolveModuleComposePlugin,\n get_compose_info)\n MODULEMD_AVAILABLE = True\nexcept ImportError:\n MODULEMD_AVAILABLE = False\n\nfrom atomic_reactor.plugin import PreBuildPluginsRunner, PluginFailedException\nfrom atomic_reactor.plugins.pre_reactor_config import (ReactorConfigPlugin,\n WORKSPACE_CONF_KEY,\n ReactorConfig)\nfrom atomic_reactor.source import VcsInfo, SourceConfig\nfrom atomic_reactor.util import ImageName\nfrom atomic_reactor.constants import REPO_CONTAINER_CONFIG\n\nfrom tests.constants import (MOCK_SOURCE, FLATPAK_GIT, FLATPAK_SHA1)\nfrom tests.fixtures import docker_tasker, reactor_config_map # noqa\nfrom tests.flatpak import FLATPAK_APP_MODULEMD, FLATPAK_APP_RPMS\nfrom tests.retry_mock import mock_get_retry_session\n\n\nclass MockSource(object):\n def __init__(self, tmpdir):\n tmpdir = str(tmpdir)\n self.dockerfile_path = \"./\"\n self.path = tmpdir\n self._config = None\n\n self.container_yaml_path = os.path.join(tmpdir, 'container.yaml')\n\n def get_build_file_path(self):\n return self.container_yaml_path, self.path\n\n def get_vcs_info(self):\n return VcsInfo('git', FLATPAK_GIT, FLATPAK_SHA1)\n\n @property\n def config(self): # lazy load after container.yaml has been created\n self._config = self._config or SourceConfig(self.path)\n return self._config\n\n\nclass MockBuilder(object):\n def __init__(self):\n self.image_id = \"xxx\"\n self.base_image = ImageName.parse(\"org.gnome.eog\")\n\n def set_base_image(self, base_image):\n pass\n\n def set_df_path(self, path):\n self.df_path = path\n\n\ndef mock_workflow(tmpdir):\n workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image')\n mock_source = MockSource(tmpdir)\n setattr(workflow, 'builder', MockBuilder())\n workflow.builder.source = mock_source\n flexmock(workflow, source=mock_source)\n\n setattr(workflow.builder, 'df_dir', str(tmpdir))\n\n return workflow\n\n\nMODULE_NAME = 'eog'\nMODULE_STREAM = 'f26'\nMODULE_VERSION = \"20170629213428\"\nMODULE_NS = MODULE_NAME + ':' + MODULE_STREAM\nMODULE_NSV = MODULE_NS + ':' + MODULE_VERSION\n\nODCS_URL = 'https://odcs.fedoraproject.org/odcs/1'\n\nPDC_URL = 'https://pdc.fedoraproject.org/rest_api/v1'\nLATEST_VERSION_JSON = [{\"modulemd\": FLATPAK_APP_MODULEMD,\n \"rpms\": FLATPAK_APP_RPMS}]\n\n\ndef compose_json(state, state_name):\n return json.dumps({\n \"flags\": [],\n \"id\": 84,\n \"owner\": \"Unknown\",\n \"result_repo\": \"http://odcs.fedoraproject.org/composes/latest-odcs-84-1/compose/Temporary\",\n \"source\": MODULE_NSV,\n \"source_type\": 2,\n \"state\": state,\n \"state_name\": state_name\n })\n\n\n@responses.activate # noqa - docker_tasker fixture\n@pytest.mark.skipif(not MODULEMD_AVAILABLE,\n reason=\"libmodulemd not available\")\n@pytest.mark.parametrize('compose_ids', (None, [], [84], [84, 2]))\n@pytest.mark.parametrize('modules', (\n None,\n [],\n [MODULE_NS],\n [MODULE_NSV],\n [MODULE_NSV, 'mod_name2-mod_stream2-mod_version2'],\n))\ndef test_resolve_module_compose(tmpdir, docker_tasker, compose_ids, modules, # noqa\n reactor_config_map):\n secrets_path = os.path.join(str(tmpdir), \"secret\")\n os.mkdir(secrets_path)\n with open(os.path.join(secrets_path, \"token\"), \"w\") as f:\n f.write(\"green_eggs_and_ham\")\n\n if modules is not None:\n data = \"compose:\\n\"\n data += \" modules:\\n\"\n for mod in modules:\n data += \" - {}\\n\".format(mod)\n tmpdir.join(REPO_CONTAINER_CONFIG).write(data)\n\n module = None\n if modules:\n module = modules[0]\n\n workflow = mock_workflow(tmpdir)\n mock_get_retry_session()\n\n def handle_composes_post(request):\n assert request.headers['Authorization'] == 'Bearer green_eggs_and_ham'\n\n if isinstance(request.body, six.text_type):\n body = request.body\n else:\n body = request.body.decode()\n body_json = json.loads(body)\n assert body_json['source']['type'] == 'module'\n assert body_json['source']['source'] == module\n return (200, {}, compose_json(0, 'wait'))\n\n responses.add_callback(responses.POST, ODCS_URL + '/composes/',\n content_type='application/json',\n callback=handle_composes_post)\n\n state = {'count': 1}\n\n def handle_composes_get(request):\n assert request.headers['Authorization'] == 'Bearer green_eggs_and_ham'\n\n if state['count'] == 1:\n response_json = compose_json(1, 'generating')\n else:\n response_json = compose_json(2, 'done')\n state['count'] += 1\n\n return (200, {}, response_json)\n\n responses.add_callback(responses.GET, ODCS_URL + '/composes/84',\n content_type='application/json',\n callback=handle_composes_get)\n\n def handle_unreleasedvariants(request):\n query = parse_qs(urlparse(request.url).query)\n\n assert query['variant_id'] == [MODULE_NAME]\n assert query['variant_version'] == [MODULE_STREAM]\n assert query['variant_release'] == [MODULE_VERSION]\n\n return (200, {}, json.dumps(LATEST_VERSION_JSON))\n\n responses.add_callback(responses.GET, PDC_URL + '/unreleasedvariants/',\n content_type='application/json',\n callback=handle_unreleasedvariants)\n\n args = {\n 'odcs_url': ODCS_URL,\n 'odcs_openidc_secret_path': secrets_path,\n 'pdc_url': PDC_URL,\n 'compose_ids': compose_ids\n }\n\n if reactor_config_map:\n workflow.plugin_workspace[ReactorConfigPlugin.key] = {}\n workflow.plugin_workspace[ReactorConfigPlugin.key][WORKSPACE_CONF_KEY] =\\\n ReactorConfig({'version': 1,\n 'odcs': {'api_url': ODCS_URL,\n 'auth': {'openidc_dir': secrets_path}},\n 'pdc': {'api_url': PDC_URL}})\n\n runner = PreBuildPluginsRunner(\n docker_tasker,\n workflow,\n [{\n 'name': ResolveModuleComposePlugin.key,\n 'args': args\n }]\n )\n\n if modules is None:\n with pytest.raises(PluginFailedException) as exc_info:\n runner.run()\n assert '\"compose\" config in container.yaml is required ' in str(exc_info.value)\n elif not modules:\n with pytest.raises(PluginFailedException) as exc_info:\n runner.run()\n assert '\"compose\" config has no modules' in str(exc_info.value)\n else:\n runner.run()\n\n compose_info = get_compose_info(workflow)\n\n assert compose_info.compose_id == 84\n assert compose_info.base_module.name == MODULE_NAME\n assert compose_info.base_module.stream == MODULE_STREAM\n assert compose_info.base_module.version == MODULE_VERSION\n assert compose_info.base_module.mmd.props.summary == 'Eye of GNOME Application Module'\n","sub_path":"tests/plugins/test_resolve_module_compose.py","file_name":"test_resolve_module_compose.py","file_ext":"py","file_size_in_byte":7617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"594322652","text":"from models.DiscordExtension import DiscordExtension\nfrom models.ExecutionContext import ExecutionContext\nfrom player import Player\nimport asyncio\nimport discord\nimport logging\n\nclass AliasExtension(DiscordExtension):\n def __init__(self, configRepo):\n self.configRepo = configRepo\n super().__init__()\n\n @property\n def name(self):\n return 'Aliases'\n\n def isserving(self, ctx: ExecutionContext):\n return ctx.cmd in ['alias', 'alias-remove']\n\n async def execute(self, ctx: ExecutionContext):\n cmd = ctx.cmd\n arg = ctx.arg\n\n if (cmd == 'alias'):\n separator = arg.find(' ')\n if (separator == -1):\n await ctx.send_message('Wrong alias syntax. Use \"alias \"')\n return\n alias = arg[:separator].strip()\n replacer = arg[separator + 1:].strip()\n self.configRepo.add_alias(alias, replacer)\n await ctx.send_message(f'Alias \"{alias}\" has been successfully added')\n else:\n if (ctx.isadmin):\n self.configRepo.remove_alias(arg)\n await ctx.send_message(f'Alias \"{ctx.arg}\" was successfully removed')\n else:\n await ctx.send_message('Only admin users can remove aliases')\n\n def list_commands(self, ctx: ExecutionContext):\n array = ['alias ']\n aliases = 'list: '\n for alias in self.configRepo.get_aliases():\n aliases += f' {alias[0]}'\n array.append(aliases)\n array.append('alias-remove ')\n return array\n\n def list_emojis(self):\n return []\n\n def emoji_to_command(self, emoji: str):\n return None\n\n async def initialize(self, bot):\n pass\n\n def dispose(self):\n pass","sub_path":"discord_bot/extensions/AliasExtension.py","file_name":"AliasExtension.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"582707723","text":"def L(a):\n list1=[]\n for i in range(a):\n grade=int(input(\"enter grade: \"))\n list1.append(grade)\n return list1\n\ndef A(b):\n return sum(b)/len(b)\n\nnum=int(input(\"enter number of students: \"))\n\ngrades=L(num)\nprint(grades)\nprint(A(grades))","sub_path":"project2-master/Functionals/EXercices/Ex_14+15.py","file_name":"Ex_14+15.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"166205018","text":"from model import Car\nimport view\n#program starts here and afterwards starts the Start method which takes a input and then calls a method \n\n\ndef showAll():\n cars_in_file = Car.getAllCars()\n return view.showAllCarsView(cars_in_file)\n\ndef start():\n view.startView()\n testinput = input()\n if testinput == 'y':\n return showAll()\n else:\n return view.endView()\n \n \n\nif __name__ == \"__main__\":\n #running controller function\n start()","sub_path":"project/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"606299547","text":"import hashlib\nimport sys\nimport os\nimport mimetypes\n\n\ndef getmd5(s):\n mime = mimetypes.guess_type(s)\n mode = None\n if mime[0] == 'text/plain':\n mode = 'r'\n else:\n mode = 'rb'\n\n m = hashlib.md5()\n buffsize = 65536\n with open(s, mode) as f:\n while True:\n x = f.read(buffsize)\n if not x:\n break\n if isinstance(x, str):\n x = x.encode('utf-8')\n m.update(x)\n return m.hexdigest()\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n print('only support 1 file')\n if len(sys.argv) == 1:\n pass\n else:\n print(\"file:\t\" + os.path.abspath(sys.argv[1]))\n print(\"md5:\t\" + getmd5(sys.argv[1]))\n","sub_path":"MD5.py","file_name":"MD5.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"74998322","text":"\"\"\"\n295\nfind median from data stream\nhard\n\"\"\"\n\nfrom heapq import *\n\n\nclass MedianFinder:\n\n # max heap and min heap\n\n def __init__(self):\n \"\"\"\n initialize your data structure here.\n \"\"\"\n self.hi = []\n self.lo = []\n\n def addNum(self, num: int) -> None:\n heappush(self.lo, -heappushpop(self.hi, num))\n while len(self.lo) > len(self.hi):\n heappush(self.hi, -heappop(self.lo))\n\n\n def findMedian(self) -> float:\n if len(self.hi) > len(self.lo):\n return self.hi[0]\n if len(self.hi) == len(self.lo):\n return (self.hi[0] - self.lo[0]) / 2.0\n\n\n\n\nsol = MedianFinder()\nsol.addNum(1)\nprint(sol.findMedian())\nsol.addNum(2)\nprint(sol.findMedian())","sub_path":"Q295-v2.py","file_name":"Q295-v2.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"147208182","text":"import requests, os,pymongo\nimport datetime\nfrom pymongo import MongoClient\nfrom bs4 import BeautifulSoup\n\nclient = MongoClient('localhost', 27017)\ndb = client['comic_downloader']\n\ncomicurls = db.urls\n\n\n\nurl = 'http://xkcd.com'\nos.makedirs('xkcd', exist_ok=True)\n\nwhile not url.endswith('#'):\n print('downloading page...%s' % url)\n res = requests.get(url)\n res.raise_for_status()\n soup = BeautifulSoup(res.text, 'html.parser')\n\n comicElems = soup.select('#comic img')\n if comicElems == []:\n print(\"there's no content here \")\n else:\n comicUrl = comicElems[0].get('src')\n urls = {'image_link': comicUrl, 'date':datetime.datetime.utcnow() }\n print('downloading the image %s...' %(comicUrl))\n res = requests.get('http:'+comicUrl)\n res.raise_for_status()\n\n imagesUrl = comicurls.insert_one(urls)\n\n imageFile = open(os.path.join('xkcd', os.path.basename(comicUrl)), 'wb')\n for chunk in res.iter_content(100000):\n imageFile.write(chunk)\n imageFile.close()\n\n prevLink = soup.select('a[rel=\"prev\"]')[0]\n url = 'http://xkcd.com' + prevLink.get('href')\nprint('done!!')\n","sub_path":"Web Scrape/Xckd.py","file_name":"Xckd.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"508906366","text":"#Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#PDX-License-Identifier: MIT-0 (For details, see https://github.com/awsdocs/amazon-rekognition-developer-guide/blob/master/LICENSE-SAMPLECODE.)\n\nimport boto3\nimport json\n\ndef add_faces_to_collection(bucket,photo,collection_id):\n\n\n \n client=boto3.client('rekognition')\n\n response=client.index_faces(CollectionId=collection_id,\n Image={'S3Object':{'Bucket':bucket,'Name':photo}},\n ExternalImageId=photo,\n MaxFaces=6,\n QualityFilter=\"AUTO\",\n DetectionAttributes=['ALL'])\n with open(str(photo)+'.json','w') as outfile:\n json.dump(response,outfile)\n \n\n print ('Results for ' + photo) \t\n print('Faces indexed:')\t\t\t\t\t\t\n for faceRecord in response['FaceRecords']:\n print(' Face ID: ' + faceRecord['Face']['FaceId'])\n print(' Location: {}'.format(faceRecord['Face']['BoundingBox']))\n\n print('Faces not indexed:')\n for unindexedFace in response['UnindexedFaces']:\n print(' Location: {}'.format(unindexedFace['FaceDetail']['BoundingBox']))\n print(' Reasons:')\n for reason in unindexedFace['Reasons']:\n print(' ' + reason)\n return len(response['FaceRecords'])\n\ndef main():\n bucket='feiscollec'\n collection_id='CFMFC'\n photo='C01P8FC18.png'\n \n \n indexed_faces_count=add_faces_to_collection(bucket, photo, collection_id)\n print(\"Faces indexed count: \" + str(indexed_faces_count))\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"GetAWS/AWSCollecAdd.py","file_name":"AWSCollecAdd.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"187746562","text":"import tensorflow as tf\n\n\nFLAGS = tf.app.flags.FLAGS\ntf.app.flags.DEFINE_float('learning_rate', 1e-3, 'learning rate to train')\ntf.app.flags.DEFINE_float('gamma', 0.9, 'gamma')\n\n\nclass DQN(object):\n def __init__(self, input_shape, inference_fn, num_actions):\n self.input_shape = input_shape\n\n with tf.name_scope('inputs'):\n self._setup_inputs()\n\n self.inference = inference_fn\n q = self.inference(self.state, 'dqn')\n o = tf.one_hot(self.action, num_actions)\n mask_q = tf.reduce_sum(o * q, axis=1)\n self.q_values = q\n self.best_actions = tf.argmax(q, axis=1)\n\n tf.summary.scalar('q', tf.reduce_mean(mask_q))\n\n next_q = self.inference(self.next_state, 'target_dqn')\n target_q = tf.stop_gradient(tf.reduce_max(next_q, axis=1))\n target = tf.clip_by_value(self.reward, -1, 1) + \\\n FLAGS.gamma * tf.cast(tf.logical_not(self.done), tf.float32) * target_q\n\n tf.summary.scalar('target_q', tf.reduce_mean(target))\n\n with tf.name_scope('copy'):\n v1 = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, 'dqn')\n v2 = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, 'target_dqn')\n self.copy = []\n for i in range(len(v1)):\n self.copy.append(tf.assign(v2[i], v1[i]))\n\n # with tf.name_scope('loss'):\n # self.loss = tf.reduce_mean(tf.square(target - mask_q))\n self.loss = tf.losses.huber_loss(target, mask_q, scope='loss',\n reduction=tf.losses.Reduction.MEAN)\n tf.summary.scalar('loss', self.loss)\n\n with tf.name_scope('optimization'):\n optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)\n self.train_ops = optimizer.minimize(self.loss)\n\n with tf.name_scope('summary'):\n self.summary = tf.summary.merge_all()\n\n def _setup_inputs(self):\n self.state = tf.placeholder(dtype=tf.float32,\n shape=[None] + self.input_shape, name='state')\n self.next_state = tf.placeholder(dtype=tf.float32,\n shape=[None] + self.input_shape, name='next_state')\n self.action = tf.placeholder(dtype=tf.int32, shape=[None], name='action')\n self.reward = tf.placeholder(dtype=tf.float32, shape=[None], name='reward')\n self.done = tf.placeholder(dtype=tf.bool, shape=[None], name='done')\n\n\ndef atari_model(inputs, name):\n def act(inputs):\n return tf.nn.leaky_relu(inputs, alpha=0.01)\n\n with tf.variable_scope(name):\n initializer = tf.variance_scaling_initializer()\n with tf.name_scope('conv1'):\n conv = tf.contrib.layers.conv2d(inputs, 32, stride=4, kernel_size=8,\n activation_fn=act, padding='SAME', weights_initializer=initializer)\n\n with tf.name_scope('conv2'):\n conv = tf.contrib.layers.conv2d(conv, 64, stride=2, kernel_size=4,\n activation_fn=act, padding='SAME', weights_initializer=initializer)\n\n with tf.name_scope('conv3'):\n conv = tf.contrib.layers.conv2d(conv, 64, stride=1, kernel_size=3,\n activation_fn=act, padding='SAME', weights_initializer=initializer)\n\n with tf.name_scope('fully_connected'):\n flatten = tf.contrib.layers.flatten(conv)\n fc = tf.contrib.layers.fully_connected(flatten, 512,\n activation_fn=act, weights_initializer=initializer)\n\n with tf.name_scope('output'):\n w = tf.get_variable(name + 'ow', shape=[512, 4],\n initializer=initializer)\n b = tf.get_variable(name + 'ob', shape=[4],\n initializer=tf.zeros_initializer())\n outputs = tf.add(tf.matmul(fc, w), b, name='q_values')\n return outputs\n\n\ndef test():\n DQN([84, 84, 4], atari_model, 4)\n\n\nif __name__ == '__main__':\n test()\n","sub_path":"rl/dqn.py","file_name":"dqn.py","file_ext":"py","file_size_in_byte":3519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"433341968","text":"#! /usr/bin/env python3\n# __author__ = 'fuhengda'\n\nimport gevent\nimport socket\nfrom gevent import monkey\nmonkey.patch_all()\n\n\ndef handle(conn):\n try:\n while True:\n data = conn.recv(1024)\n lendata = len(data)\n print(data.decode())\n\n if lendata != 0:\n conn.send(data)\n\n else:\n conn.send('null')\n\n except Exception as e:\n print(e)\n\n finally:\n conn.close()\n\n\ndef server(port):\n s = socket.socket()\n s.bind(('127.0.0.1', port))\n s.listen(5)\n print('start listening !')\n\n while True:\n client, addr = s.accept()\n print(client, addr)\n gevent.spawn(handle, client)\n '''\n gevent.spawn:\n Create a new :class:`Greenlet` object and schedule it to run ``function(*args, **kwargs)``.\n This can be used as ``gevent.spawn`` or ``Greenlet.spawn``.\n '''\n\n\nif __name__ == '__main__':\n server(8888)\n","sub_path":"oldboy/day09-进程-线程-协程/协程Coroutine/协程gevent-socketserver.py","file_name":"协程gevent-socketserver.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"12875561","text":"import tensorflow as tf\r\nimport numpy as np\r\nimport collections\r\nimport matplotlib.pylab as plt\r\nimport cv2\r\nfrom skimage import transform,data\r\n\r\nclass generator():\r\n def __init__(self):\r\n self.EPS = 1e-12\r\n self.CROP_SIZE = 256\r\n self.Model = collections.namedtuple(\"Model\",\"my_inputs, outputs\")\r\n self.action_space = np.linspace(80, 1520, 19, dtype=np.int)\r\n self.n_actions = len(self.action_space)\r\n self.lines = 100\r\n self.currentline = 0\r\n self.interval = 5\r\n self.last_interval = 5\r\n def discrim_conv(self,batch_input, out_channels, stride):\r\n padded_input = tf.pad(batch_input, [[0, 0], [1, 1], [1, 1], [0, 0]], mode=\"CONSTANT\")\r\n return tf.layers.conv2d(padded_input, out_channels, kernel_size=4, strides=(stride, stride), padding=\"valid\", kernel_initializer=tf.random_normal_initializer(0, 0.02))\r\n def lrelu(self,x, a):\r\n with tf.name_scope(\"lrelu\"):\r\n x = tf.identity(x)\r\n return (0.5 * (1 + a)) * x + (0.5 * (1 - a)) * tf.abs(x)\r\n def gen_conv(self,batch_input, out_channels):\r\n # [batch, in_height, in_width, in_channels] => [batch, out_height, out_width, out_channels]\r\n initializer = tf.random_normal_initializer(0, 0.02)\r\n return tf.layers.conv2d(batch_input, out_channels, kernel_size=4, strides=(2, 2), padding=\"same\", kernel_initializer=initializer)\r\n def gen_deconv(self,batch_input, out_channels):\r\n # [batch, in_height, in_width, in_channels] => [batch, out_height, out_width, out_channels]\r\n initializer = tf.random_normal_initializer(0, 0.02)\r\n return tf.layers.conv2d_transpose(batch_input, out_channels, kernel_size=4, strides=(2, 2), padding=\"same\", kernel_initializer=initializer)\r\n def preprocess(self,image):\r\n with tf.name_scope(\"preprocess\"):\r\n # [0, 1] => [-1, 1]\r\n return image * 2 - 1\r\n def deprocess(self,image):\r\n with tf.name_scope(\"deprocess\"):\r\n # [-1, 1] => [0, 1]\r\n return (image + 1) / 2\r\n def batchnorm(self,inputs):\r\n return tf.layers.batch_normalization(inputs, axis=3, epsilon=1e-5, momentum=0.1, training=True, gamma_initializer=tf.random_normal_initializer(1.0, 0.02))\r\n def create_generator(self,generator_inputs, generator_outputs_channels):\r\n layers = []\r\n\r\n # encoder_1: [batch, 256, 256, in_channels] => [batch, 128, 128, ngf]\r\n with tf.variable_scope(\"encoder_1\"):\r\n output = self.gen_conv(generator_inputs, 64)\r\n layers.append(output)\r\n\r\n layer_specs = [\r\n 64 * 2, # encoder_2: [batch, 128, 128, ngf] => [batch, 64, 64, ngf * 2]\r\n 64 * 4, # encoder_3: [batch, 64, 64, ngf * 2] => [batch, 32, 32, ngf * 4]\r\n 64 * 8, # encoder_4: [batch, 32, 32, ngf * 4] => [batch, 16, 16, ngf * 8]\r\n 64 * 8, # encoder_5: [batch, 16, 16, ngf * 8] => [batch, 8, 8, ngf * 8]\r\n 64 * 8, # encoder_6: [batch, 8, 8, ngf * 8] => [batch, 4, 4, ngf * 8]\r\n 64 * 8, # encoder_7: [batch, 4, 4, ngf * 8] => [batch, 2, 2, ngf * 8]\r\n 64 * 8, # encoder_8: [batch, 2, 2, ngf * 8] => [batch, 1, 1, ngf * 8]\r\n ]\r\n\r\n for out_channels in layer_specs:\r\n with tf.variable_scope(\"encoder_%d\" % (len(layers) + 1)):\r\n rectified = self.lrelu(layers[-1], 0.2)\r\n # [batch, in_height, in_width, in_channels] => [batch, in_height/2, in_width/2, out_channels]\r\n convolved = self.gen_conv(rectified, out_channels)\r\n output = self.batchnorm(convolved)\r\n layers.append(output)\r\n\r\n layer_specs = [\r\n (64 * 8, 0.5), # decoder_8: [batch, 1, 1, ngf * 8] => [batch, 2, 2, ngf * 8 * 2]\r\n (64 * 8, 0.5), # decoder_7: [batch, 2, 2, ngf * 8 * 2] => [batch, 4, 4, ngf * 8 * 2]\r\n (64 * 8, 0.5), # decoder_6: [batch, 4, 4, ngf * 8 * 2] => [batch, 8, 8, ngf * 8 * 2]\r\n (64 * 8, 0.0), # decoder_5: [batch, 8, 8, ngf * 8 * 2] => [batch, 16, 16, ngf * 8 * 2]\r\n (64 * 4, 0.0), # decoder_4: [batch, 16, 16, ngf * 8 * 2] => [batch, 32, 32, ngf * 4 * 2]\r\n (64 * 2, 0.0), # decoder_3: [batch, 32, 32, ngf * 4 * 2] => [batch, 64, 64, ngf * 2 * 2]\r\n (64 , 0.0), # decoder_2: [batch, 64, 64, ngf * 2 * 2] => [batch, 128, 128, ngf * 2]\r\n ]\r\n\r\n num_encoder_layers = len(layers)\r\n for decoder_layer, (out_channels, dropout) in enumerate(layer_specs):\r\n skip_layer = num_encoder_layers - decoder_layer - 1\r\n with tf.variable_scope(\"decoder_%d\" % (skip_layer + 1)):\r\n if decoder_layer == 0:\r\n # first decoder layer doesn't have skip connections\r\n # since it is directly connected to the skip_layer\r\n input = layers[-1]\r\n else:\r\n input = tf.concat([layers[-1], layers[skip_layer]], axis=3)\r\n\r\n rectified = tf.nn.relu(input)\r\n # [batch, in_height, in_width, in_channels] => [batch, in_height*2, in_width*2, out_channels]\r\n output = self.gen_deconv(rectified, out_channels)\r\n output = self.batchnorm(output)\r\n\r\n if dropout > 0.0:\r\n output = tf.nn.dropout(output, keep_prob=1 - dropout)\r\n\r\n layers.append(output)\r\n\r\n # decoder_1: [batch, 128, 128, ngf * 2] => [batch, 256, 256, generator_outputs_channels]\r\n with tf.variable_scope(\"decoder_1\"):\r\n input = tf.concat([layers[-1], layers[0]], axis=3)\r\n rectified = tf.nn.relu(input)\r\n output = self.gen_deconv(rectified, generator_outputs_channels)\r\n output = tf.tanh(output)\r\n layers.append(output)\r\n\r\n return layers[-1]\r\n def create_model(self,):\r\n my_inputs = tf.placeholder(dtype=np.uint8,shape=(100,1601))\r\n\r\n my_img = my_inputs[:, :, tf.newaxis]\r\n my_img = tf.image.decode_jpeg(tf.image.encode_jpeg(my_img))\r\n my_img = tf.image.convert_image_dtype(my_img, dtype=tf.float32)\r\n my_img = tf.identity(my_img)\r\n my_img.set_shape([None, None, 1])\r\n\r\n inputs = self.preprocess(my_img[:, :, :])\r\n inputs = tf.image.resize_images(inputs, [256, 256], method=tf.image.ResizeMethod.AREA)\r\n inputs = inputs[tf.newaxis,:,:,:]\r\n\r\n with tf.variable_scope(\"generator\"):\r\n outputs = self.create_generator(inputs, 1)\r\n outputs = self.deprocess(outputs)\r\n outputs = self.convert(outputs)\r\n\r\n return self.Model(\r\n my_inputs = my_inputs,\r\n outputs=outputs,\r\n )\r\n def convert(self,image):\r\n size = [self.CROP_SIZE, int(round(self.CROP_SIZE * 1))]\r\n image = tf.image.resize_images(image, size=size, method=tf.image.ResizeMethod.BICUBIC)\r\n return tf.image.convert_image_dtype(image, dtype=tf.float32, saturate=True)\r\n def create_sweep(self,):\r\n sweep = np.zeros(shape=(100,1601))\r\n self.row=0\r\n for i in range(self.lines):\r\n if i - self.last_interval == 0:\r\n #self.interval = np.random.randint(2,11)\r\n self.last_interval += self.interval\r\n self.row += 1\r\n randint = np.random.randint(0,40)\r\n sweep[i,self.action_space[self.row%self.n_actions]-10-randint:self.action_space[self.row%self.n_actions]+11+randint]=1\r\n sweep[:, 84:109] += 2\r\n sweep[:, 152:176] += 2\r\n sweep[:, 279:301] += 2\r\n sweep[:, 390:411] += 2\r\n sweep[:, 615:640] += 2\r\n sweep[:, 944:954] += 2\r\n sweep[:, 960:992] += 2\r\n sweep[:, 1000:1009] += 2\r\n sweep[:, 1045:1066] += 2\r\n sweep[:, 1157:1180] += 2\r\n l=np.array(sweep[0:100,:],)\r\n l = l * 255 / 3\r\n\r\n #cv2.imwrite(\"label.jpg\", img)\r\n #plt.imshow(img, interpolation='nearest', aspect='auto')\r\n #plt.show()\r\n\r\n return l\r\n\r\n def generate_img(self,input,):\r\n tf.reset_default_graph()\r\n model = self.create_model()\r\n\r\n saver = tf.train.Saver()\r\n checkpoint = tf.train.latest_checkpoint(\"../cgan_weights\")\r\n with tf.Session() as sess:\r\n print(\"loading model from checkpoint\")\r\n saver.restore(sess, checkpoint)\r\n img = sess.run(model.outputs,feed_dict={model.my_inputs:input})\r\n img = np.reshape(img, (256, 256))\r\n img = transform.resize(img, (100, 1601),mode='constant')\r\n img = img * 255\r\n plt.imshow(img, interpolation='nearest', aspect='auto')\r\n plt.show()\r\n return img\r\n\r\n\"\"\"\r\nif __name__ == \"__main__\":\r\n gen = generator()\r\n img = gen.create_sweep()\r\n plt.imshow(img, interpolation='nearest', aspect='auto')\r\n plt.show()\r\n gen_img = gen.generate_img(img)\r\n\"\"\"","sub_path":"my_flask_api/model/gan_load_model.py","file_name":"gan_load_model.py","file_ext":"py","file_size_in_byte":8905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"484107906","text":"\"\"\"\nbyceps.announce.handlers.guest_server\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nAnnounce guest server events.\n\n:Copyright: 2014-2023 Jochen Kupperschmidt\n:License: Revised BSD (see `LICENSE` file for details)\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom flask_babel import gettext\n\nfrom byceps.announce.helpers import (\n get_screen_name_or_fallback,\n with_locale,\n)\nfrom byceps.events.guest_server import GuestServerRegisteredEvent\nfrom byceps.services.webhooks.models import Announcement, OutgoingWebhook\n\n\n@with_locale\ndef announce_guest_server_registered(\n event_name: str, event: GuestServerRegisteredEvent, webhook: OutgoingWebhook\n) -> Announcement | None:\n \"\"\"Announce that a guest server has been registered.\"\"\"\n initiator_screen_name = get_screen_name_or_fallback(\n event.initiator_screen_name\n )\n owner_screen_name = get_screen_name_or_fallback(event.owner_screen_name)\n\n text = gettext(\n '%(initiator_screen_name)s has registered a guest server '\n 'owned by \"%(owner_screen_name)s for party \"%(party_title)s\".',\n initiator_screen_name=initiator_screen_name,\n owner_screen_name=owner_screen_name,\n party_title=event.party_title,\n )\n\n return Announcement(text)\n","sub_path":"byceps/announce/handlers/guest_server.py","file_name":"guest_server.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"506299646","text":"import cv2, math\nimport numpy as np\n\npaths = [\n 'input/clock.jpg',\n 'input/clocknoise.jpg',\n 'input/test1.png',\n 'input/test2.jpg',\n 'input/test3.jpg',\n 'input/test4.jpg',\n 'input/test5.jpg',\n 'input/test6.jpg',\n 'input/test7.jpg',\n 'input/test8.png',\n]\n\nTESTNUM = len(paths)\n# TESTNUM = 4\nK = 2\nWIDTH = 500\nHEIGHT = 500\nLINETRESH = 50\nMINLINELENGTH = 20\nMAXLINEGAP = 100\n\ndef preprocessImg(img):\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n _, binaryImg = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)\n\n struct = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))\n iterations = 1\n binaryImg = cv2.morphologyEx(binaryImg, cv2.MORPH_ERODE, struct, None, None, iterations, cv2.BORDER_REFLECT101)\n\n cv2.imshow('binary', cv2.resize(binaryImg, (WIDTH, HEIGHT), cv2.INTER_AREA))\n cv2.waitKey(0)\n return binaryImg\n\ndef findContoursOnBinary(binaryImg, possibleContours):\n contours, _ = cv2.findContours(binaryImg, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n for cntrIdx in range(0, len(contours)):\n # print(contours[cntrIdx].shape)\n if (contours[cntrIdx].shape[0] > binaryImg.shape[0]):\n possibleContours.append(contours[cntrIdx])\n\n return possibleContours\n\ndef findPossibleContours(binaryImg):\n possibleContours = findContoursOnBinary(binaryImg, [])\n\n struct = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))\n iterations = 1\n\n while possibleContours == []:\n binaryImg = cv2.morphologyEx(binaryImg, cv2.MORPH_DILATE, struct, None, None, iterations, cv2.BORDER_REFLECT101)\n possibleContours = findContoursOnBinary(binaryImg, possibleContours)\n\n return possibleContours\n\ndef createSegmentImg(img, possibleContours):\n segment = np.zeros((img.shape[0], img.shape[1]), dtype=np.uint8)\n cv2.drawContours(segment, possibleContours, len(possibleContours) - 1, (255,0,0), -1, cv2.LINE_4)\n cv2.drawContours(img, possibleContours, len(possibleContours) - 1, (255,0,0), -1, cv2.LINE_4)\n cv2.imshow('img', cv2.resize(img, (WIDTH, HEIGHT), cv2.INTER_AREA))\n cv2.imshow('segment', cv2.resize(segment, (WIDTH, HEIGHT), cv2.INTER_AREA))\n cv2.waitKey(0)\n return segment\n\ndef findAllLines(img, segment):\n lines = cv2.HoughLinesP(segment, 1, np.pi / 180, LINETRESH, None, MINLINELENGTH, MAXLINEGAP)\n\n X1 = []\n X2 = []\n Y1 = []\n Y2 = []\n\n for [currentLine] in lines:\n\n x1 = currentLine[0]\n y1 = currentLine[1]\n x2 = currentLine[2]\n y2 = currentLine[3]\n\n X1.append(x1)\n X2.append(x2)\n Y1.append(y1)\n Y2.append(y2)\n\n cv2.line(img, (x1, y1), (x2, y2), (0,0,255), 2)\n line = cv2.resize(img, (WIDTH, HEIGHT), interpolation = cv2.INTER_AREA)\n cv2.imshow('Lines', line)\n cv2.waitKey(0)\n return X1, X2, Y1, Y2\n\ndef createStackedLines(X1, X2, Y1, Y2):\n X1 = np.array(X1)\n Y1 = np.array(Y1)\n X2 = np.array(X2)\n Y2 = np.array(Y2)\n\n X1dash = X1.reshape(-1,1)\n Y1dash = Y1.reshape(-1,1)\n X2dash = X2.reshape(-1,1)\n Y2dash = Y2.reshape(-1,1)\n\n stacked = np.hstack((X1dash, Y1dash, X2dash, Y2dash))\n floatP = np.float32(stacked)\n return floatP\n\ndef createLinePointsWithKmeans(floatP, originalImg):\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)\n _, _, center = cv2.kmeans(floatP, K, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)\n finalImg = originalImg.copy()\n\n firstPts = []\n secondPts = []\n\n for p in range(len(center)):\n\n x1 = int(center[p][0])\n y1 = int(center[p][1])\n x2 = int(center[p][2])\n y2 = int(center[p][3])\n\n pt1 = (x1, y1)\n pt2 = (x2, y2)\n \n print(f'Line points: x = {pt1}, y = {pt2}')\n firstPts.append(pt1)\n secondPts.append(pt2)\n\n cv2.line(finalImg, pt1, pt2, (0, 255, 0), 2)\n resized = cv2.resize(finalImg, (WIDTH, HEIGHT), interpolation = cv2.INTER_AREA)\n cv2.imshow('Final',resized)\n\n cv2.waitKey(0)\n return firstPts, secondPts, finalImg\n\ndef createVectorsAndEquations(firstPts, secondPts):\n directionVectors = []\n normalVectors = []\n linesEq = []\n\n for (pt1, pt2) in zip(firstPts, secondPts):\n (x1, y1) = pt1\n (x2, y2) = pt2\n\n v1 = x2 - x1\n v2 = y2 - y1\n gcd = math.gcd(v1, v2)\n v1 = int(v1 / gcd)\n v2 = int(v2 / gcd)\n n1 = -v2\n n2 = v1\n c = int(n1 * x1 + n2 * y1)\n\n directionVectors.append([v1, v2])\n normalVectors.append([n1, n2])\n linesEq.append([n1, n2, c])\n \n return directionVectors, normalVectors, linesEq\n\ndef gaussElimination(linesEq):\n mul = linesEq[0][0]/linesEq[1][0] if linesEq[1][0] != 0 else 1\n for pts in linesEq[1:]:\n temp = [[y*mul for y in pts]]\n linesEq[1:] = temp\n\n return linesEq\n\ndef findIntersectionPoints(linesEq, finalImg):\n \n # a = 0, a1 - a2 (linesEq[0][0] - linesEq[1][0]), b = b1 - b2 (linesEq[0][1] - linesEq[1][1]), c = c1 - c2 (linesEq[0][2] - linesEq[1][2])\n # y = c / b\n # x = (c1 - y) / a1\n # i need only a1 to compute everything, so i call it a\n\n a = linesEq[0][0]\n\n b1 = linesEq[0][1]\n b2 = linesEq[1][1]\n b = b1 - b2\n\n c1 = linesEq[0][2]\n c2 = linesEq[1][2]\n c = c1 - c2\n\n\n intersectionY = int(c / b) if b != 0 else 0\n intersectionX = int((c1 - (b1*intersectionY))/a) if a != 0 else 0\n print(f'Intersection points: x = {intersectionX}, y = {intersectionY}')\n\n cv2.circle(finalImg, (intersectionX, intersectionY), 3, (255, 0, 0), -1)\n cv2.line(finalImg, (intersectionX, 0), (intersectionX, finalImg.shape[0]), (0, 255, 0), 1)\n circle = cv2.resize(finalImg, (WIDTH, HEIGHT), cv2.INTER_AREA)\n cv2.imshow('Circles', circle)\n\n return intersectionX, intersectionY\n\ndef getFurthestPoints(firstPts, secondPts, intersectionX, intersectionY):\n furtherPoints = []\n\n for (pt1, pt2) in zip(firstPts, secondPts):\n (x1, y1) = pt1\n (x2, y2) = pt2\n\n bigger = 'first' if abs(x1 - intersectionX) + abs(y1 - intersectionY) > abs(x2 - intersectionX) + abs(y2 - intersectionY) else 'second'\n if bigger == 'first':\n furtherPoints.append([x1, y1])\n else:\n furtherPoints.append([x2, y2])\n\n furthestPoint = furtherPoints[0] if abs(furtherPoints[0][0] - intersectionX) + abs(furtherPoints[0][1] - intersectionY) > abs(furtherPoints[1][0] - intersectionX) + abs(furtherPoints[1][1] - intersectionY) else furtherPoints[1]\n print(f'Further points: {furtherPoints}')\n print(f'Furthest point: {furthestPoint}')\n\n return furtherPoints, furthestPoint\n\ndef getAngles(directionVectors, furthestPoint, furtherPoints, intersectionX, intersectonY, img):\n minutePoint = furthestPoint;\n hourPoint = furtherPoints[0] if furthestPoint == furtherPoints[1] else furtherPoints[1]\n vectorMinute = directionVectors[0] if minutePoint == furtherPoints[0] else directionVectors[1]\n vectorHour = directionVectors[0] if vectorMinute == directionVectors[1] else directionVectors[1]\n intersectionVector = [0, img.shape[0]]\n\n unitVectorMinute = vectorMinute / np.linalg.norm(vectorMinute)\n unitVectorHour = vectorHour / np.linalg.norm(vectorHour)\n unitVectorIntersection = intersectionVector / np.linalg.norm(intersectionVector)\n dotProduct = np.dot(unitVectorIntersection, unitVectorMinute)\n\n\n angle = math.radians(180) - np.arccos(dotProduct) if furtherPoints[0][0] > intersectionX else (math.radians(180) - np.arccos(dotProduct)) + math.radians(180)\n minuteAngle = int(math.degrees(angle))\n print(f'Minute hand angle rounded: {minuteAngle}')\n dotProduct = np.dot(unitVectorHour, unitVectorIntersection)\n angle = math.radians(180) - np.arccos(dotProduct) if furtherPoints[1][0] > intersectionX else (math.radians(180) - np.arccos(dotProduct)) + math.radians(180)\n hourAngle = int(math.degrees(angle))\n print(f'Hour hand angle rounded: {hourAngle}')\n\n badUpperHalf = (minutePoint[1] > intersectonY and (minuteAngle < 90 or minuteAngle > 270)) or (hourPoint[1] > intersectonY and (hourAngle < 90 or hourAngle > 270))\n badBottomHalf = (minutePoint[1] < intersectonY and (minuteAngle > 90 and minuteAngle < 270)) or (hourPoint[1] < intersectonY and (hourAngle > 90 and hourAngle < 270))\n\n if badUpperHalf or badBottomHalf :\n minuteAngle = (minuteAngle + 180) % 360\n\n if badUpperHalf or badBottomHalf:\n hourAngle = (hourAngle + 180) % 360\n\n return minuteAngle, hourAngle\n\ndef calculateTime(minuteAngle, hourAngle):\n minutePartition = 360/60\n hourPartition = 360/12\n hour = round(hourAngle/hourPartition)\n minute = round(minuteAngle/minutePartition)\n additionalZeroHour = '' if len(str(hour)) > 1 else '0'\n additionalZeroMinute = '' if len(str(minute)) > 1 else '0'\n\n print(f'The time is: {additionalZeroHour}{hour}:{additionalZeroMinute}{minute}')\n\ndef whatIsTheTime(imagePath):\n img = cv2.imread(imagePath)\n originalImg = img.copy()\n cv2.imshow(imagePath, cv2.resize(originalImg, (WIDTH, HEIGHT), cv2.INTER_AREA))\n\n binaryImg = preprocessImg(img)\n possibleContours = findPossibleContours(binaryImg)\n segment = createSegmentImg(img, possibleContours)\n X1, X2, Y1, Y2 = findAllLines(img, segment)\n floatP = createStackedLines(X1, X2, Y1, Y2)\n firstPts, secondPts, finalImg = createLinePointsWithKmeans(floatP, originalImg)\n directionVectors, normalVectors, linesEq = createVectorsAndEquations(firstPts, secondPts)\n\n print(f'Direction vectors : {directionVectors}')\n print(f'Normal vectors : {normalVectors}')\n print(f'Equations: {linesEq}')\n linesEq = gaussElimination(linesEq)\n\n print(f'Equations after multiplying: {linesEq}')\n intersectionX, intersectionY = findIntersectionPoints(linesEq, finalImg)\n furtherPoints, furthestPoint = getFurthestPoints(firstPts, secondPts, intersectionX, intersectionY)\n minuteAngle, hourAngle = getAngles(directionVectors, furthestPoint, furtherPoints, intersectionX, intersectionY, img)\n calculateTime(minuteAngle, hourAngle)\n\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\nfor i in range(TESTNUM):\n whatIsTheTime(paths[i])\n","sub_path":"clock.py","file_name":"clock.py","file_ext":"py","file_size_in_byte":9672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"600850030","text":"\nimport pandas as pd \nimport numpy as np\nimport pickle\n\nfrom gensim.models import Word2Vec\nfrom sklearn.metrics.pairwise import cosine_similarity\n\n\nclass ItemSequences:\n '''\n ItemSequences\n '''\n def __init__(self, df_column):\n df_column = df_column.copy()\n self.sequences = df_column.values.tolist()\n\n def __iter__(self):\n for sequence in self.sequences:\n yield sequence.split(',')\n\n\nclass Item2Vec:\n def __init__(self, df, column_name, window_size, embedding_size):\n self.df = df[column_name]\n self.window_size = window_size\n self.embedding_size = embedding_size \n self.embedding_matrix, self.items = self.get_embedding()\n\n def get_embedding(self):\n \"\"\"Generate word2vec style embeddings trained on sequences of events using Skip-gram loss\"\"\"\n sentences = ItemSequences(self.df)\n model = Word2Vec(sentences, window=self.window_size, min_count=5, sg=1, size=self.embedding_size)\n items = list(map(int,model.wv.index2word))\n return model.wv.vectors, items\n\n def similarity(self, train_items=None):\n \"\"\"Create similarity matrix and nearest neighbours for items\"\"\"\n if train_items:\n self.valid_items = list(set(train_items) & set(self.items))\n self.check_valid = [True if i in self.valid_items else False for i in train_items]\n valid_keys = [self.items.index(i) for i in self.valid_items]\n valid_embedding_matrix = self.embedding_matrix[valid_keys]\n self.reverse_lookup = {i:train_items.index(j) for i,j in enumerate(self.valid_items)}\n else:\n valid_embedding_matrix = self.embedding_matrix\n\n self.S = cosine_similarity(valid_embedding_matrix)\n self.top_k = np.argsort(-self.S,axis=1)[:,1:]\n","sub_path":"imf2vec/embedding.py","file_name":"embedding.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"549585325","text":"from math import ceil\n\ndef a(nuip: int, meses:list()) -> tuple:\n v1 = meses[0]\n v2 = meses[1]\n v3 = meses[2]\n\n return nuip, ceil(v1), ceil(v2), ceil(v3)\n\nprint(a(45345, [105, 35.2, 140.2]))","sub_path":"Ciclo I/Unidad 1 y 2/prueba.py","file_name":"prueba.py","file_ext":"py","file_size_in_byte":201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"648014511","text":"import requests\r\nimport json\r\nimport smtplib\r\nfrom sqlwrapper import gensql,dbget\r\nfrom email.mime.text import MIMEText\r\nfrom email.mime.multipart import MIMEMultipart\r\ndef sendemailani(name,email,message,conf_no,arrival,depature,room_type,id1,book_date):\r\n print(name,email,type(email),message,conf_no,arrival,depature, room_type)\r\n sender = \"infocuit.testing@gmail.com\"\r\n ids = id1\r\n for i in email:\r\n\r\n receiver = i\r\n #print(sender,type(sender),receiver,type(receiver))\r\n subject = \"Hotel Booking\"\r\n msg = MIMEMultipart()\r\n msg['from'] = sender\r\n msg['to'] = receiver\r\n msg['subject'] = subject\r\n print(ids)\r\n hotel_det = json.loads(dbget(\"select * from ivr_hotel_list where id = \"+str(ids)+\"\"))\r\n print(hotel_det)\r\n html = \"\"\"\\\r\n \r\n \r\n \r\n \r\n \r\n \r\n
\r\n
\r\n
\r\n          \"\"\"+hotel_det[0]['hotel_name']+\"\"\",\r\n          \"\"\"+hotel_det[0]['address']+\"\"\",\r\n          \"\"\"+hotel_det[0]['mobile_no']+\"\"\",\r\n          \"\"\"+hotel_det[0]['email']+\"\"\",\r\n          \"\"\"+book_date+\"\"\".\r\n          \r\n          Dear \"\"\"+name+\"\"\",\r\n                We are delighted that you have selected our \"\"\"+hotel_det[0]['hotel_name']+\"\"\" On behalf of the entire team at the \r\n       \"\"\"+hotel_det[0]['hotel_name']+\"\"\",extend you a very welcome and trust stay with us will be both enjoyable and comfortable\r\n       \"\"\"+hotel_det[0]['hotel_name']+\"\"\" offers a selection of business services and facilities.which are detailed in the booklet,\r\n       placed on the writing table in your room.Should you require any assistance or have any specific\r\n       requirements,please do not hesitate to contact me extension(999).\r\n           
\r\n
\r\n          Confirmation Number: \"\"\"+conf_no+\"\"\"\r\n          Arrival Date: \"\"\"+arrival+\"\"\"\r\n          Depature Date: \"\"\"+depature+\"\"\"\r\n          Room Type: \"\"\"+room_type+\"\"\"\r\n\r\n          With best regards / Yours sincerely,\r\n          Hotel Manager
\r\n \r\n
\r\n \r\n \r\n \"\"\"\r\n\r\n msg.attach(MIMEText(html,'html'))\r\n \r\n gmailuser = 'infocuit.testing@gmail.com'\r\n password = 'infocuit@123'\r\n server = smtplib.SMTP('smtp.gmail.com',587)\r\n server.starttls()\r\n server.login(gmailuser,password)\r\n text = msg.as_string()\r\n server.sendmail(sender,receiver,text)\r\n print (\"the message has been sent successfully\")\r\n server.quit()\r\n return(json.dumps({'Return': 'Message Send Successfully',\"Return_Code\":\"MSS\",\"Status\": \"Success\",\"Status_Code\": \"200\"}, sort_keys=True, indent=4))\r\n\r\n\r\n\r\ndef callexternalapi(request):\r\n phone = request.json['mobile']\r\n d = {}\r\n d['customer_mobile'] = phone\r\n result = json.loads(gensql('select','ivr_room_customer_booked','*',d))\r\n re = result[0]\r\n print(re,type(re)) \r\n name = re['customer_name']\r\n email = ['r.ahamed@konnect247.com','i.sidhanee@konnect247.com','jazizahmed@gmail.com','infocuit.daisy@gmail.com']\r\n #email = ['infocuit.daisy@gmail.com','infocuit.aravindh@gmail.com']\r\n message = \"Booking Confirmed\"\r\n conf_no = re['customer_confirmation_number']\r\n #hotel_name = \"SMARTMO\"\r\n arrival = re['customer_arrival_date']\r\n depature = re['customer_depature_date']\r\n room_type = re['customer_room_type']\r\n id1 = re['id']\r\n book_date = re['customer_booked_date']\r\n return sendemailani(name,email,message,conf_no,arrival,depature,room_type,id1,book_date)\r\n","sub_path":"SendEmailANI.py","file_name":"SendEmailANI.py","file_ext":"py","file_size_in_byte":4182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"515344620","text":"'''\n\nDescription: As a young jedi you must learn to converse with Yoda. You have found a simple rule that helps change a \"normal\" sentence into \"Yoda talk\". \nTake the first two words in the sentence and place them at the end. Write a program that uses this rule to change normal sentence into \"Yoda talk\".\n\nInput:\n\nInput consists of a string that you must change\n\ninto \"Yoda talk\". Assume that the maximum length of the string is 100:\n\nOutput:\n\nPrint the corresponding sentence in Yoda talk.\n\nSample Input:\n\nI will go now to find the Wookiee\n\nSample Output:\n\ngo now to find the Wookiee I will\n\n'''\n\ndef yoda(input1):\n str = \" \"\n temp = input1.split(\" \")\n temp2 = temp[:2]\n temp3 = temp[2:]\n convert = temp3 + temp2\n\n print(str.join(convert))\n\ninput1 = input(\"Enter the string: \")\nyoda(input1)","sub_path":"Problem_10.py","file_name":"Problem_10.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"499855238","text":"import uuid\nfrom pytest import fixture, raises\nimport os\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\n\nfrom applitools import logger\nfrom applitools.errors import DiffsFoundError\nfrom applitools.eyes import Eyes\nfrom applitools.logger import StdoutLogger\n\n# os.environ['HTTPS_PROXY'] = \"http://localhost:8888\"\n\n\n@fixture(scope=\"module\", autouse=True, name=\"eyes\")\ndef setup_eyes(request):\n logger.set_logger(StdoutLogger())\n eyes = Eyes()\n # eyes.force_full_page_screenshot = True\n # eyes.save_new_tests = False\n eyes.hide_scrollbars = True\n\n def fin():\n eyes.abort_if_not_closed()\n\n request.addfinalizer(fin)\n return eyes\n\n\n@fixture(scope=\"module\", autouse=True, name=\"driver\")\ndef setup_driver(request):\n chrome_options = Options()\n chrome_options.add_argument('disable-infobars')\n chrome_options.add_argument('headless')\n driver = webdriver.Chrome(chrome_options=chrome_options)\n\n def fin():\n driver.quit()\n request.addfinalizer(fin)\n return driver\n\n\ndef test_session_summary_status_new(eyes, driver):\n # First test\n driver = eyes.open(driver, \"Python SDK\", \"TestResults-New_{}\".format(str(uuid.uuid4())), {'width': 800, 'height': 600})\n driver.get('http://applitools.github.io/demo/TestPages/FramesTestPage/')\n eyes.check_window(\"initial\")\n eyes.close()\n\n\ndef test_summary_status_diffsfound(eyes, driver):\n # Second test\n driver = eyes.open(driver, \"Python SDK\", \"TestResults-DiffsFound\", {'width': 800, 'height': 600})\n driver.get('http://applitools.github.io/demo/TestPages/FramesTestPage/')\n eyes.check_window(\"initial\")\n with raises(DiffsFoundError):\n eyes.close()\n\n\ndef test_directly_set_viewport_size(eyes, driver):\n required_viewport = {'width': 450, 'height': 300}\n eyes.set_viewport_size(driver, required_viewport)\n driver = eyes.open(driver, \"Python SDK\", \"TestViewPort-DirectlySetViewportt\")\n assert required_viewport == eyes.get_viewport_size()\n assert required_viewport == driver.get_viewport_size()\n\n\n","sub_path":"test/server_results_text.py","file_name":"server_results_text.py","file_ext":"py","file_size_in_byte":2070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"589354518","text":"# Question 5:\n# (Longest Common Prefix):\n# Write a function to find the longest common prefix string amongst an array of strings.\n# If there is no common prefix, return an empty string \"\".\n# link: https://leetcode-cn.com/problems/longest-common-prefix/\n\nclass Solution:\n def maxPre(self, array1):\n \"\"\"\n :param array1:\n :return: string\n \"\"\"\n s1 = min(array1)\n s2 = max(array1)\n # print(s1)\n for i, x in enumerate(s1):\n if x != s2[i]:\n return s2[:i]\n\n\nsol = Solution()\nprint(sol.maxPre([\"flower\", \"flow\", \"flight\"]))\n\n\n###s1 = min(array1) is right, if s1 = min(array1,key=len) won't be right","sub_path":"Easy/longCommonPre.py","file_name":"longCommonPre.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"356525492","text":"from deepdab.ai import *\nfrom deepdab import *\nimport tensorflow as tf\nimport numpy as np\n\n\nclass TDOneGradientPolicyMLPV2(Policy):\n def __init__(self, board_size):\n self._sess = tf.Session()\n self._board_size = board_size\n\n self._n_input = len(init_board_state(board_size))\n self._n_hidden1 = 300\n self._n_hidden2 = 300\n self._n_hidden3 = 300\n self._n_output = 1\n\n self._input = tf.placeholder(\"float\", [1, self._n_input], name=\"input\")\n self._target = tf.placeholder(\"float\", [1, self._n_output], name=\"target\")\n self._error = tf.placeholder(\"float\", shape=[], name=\"error\")\n self._lr = tf.placeholder(\"float\", shape=[], name=\"learning_rate\")\n self._sum_grad_W1 = tf.placeholder(\"float\", shape=[self._n_input, self._n_hidden1], name=\"sum_grad_W1\")\n self._sum_grad_W2 = tf.placeholder(\"float\", shape=[self._n_hidden1, self._n_hidden2], name=\"sum_grad_W2\")\n self._sum_grad_W3 = tf.placeholder(\"float\", shape=[self._n_hidden2, self._n_hidden3], name=\"sum_grad_W3\")\n self._sum_grad_W_out = tf.placeholder(\"float\", shape=[self._n_hidden3, self._n_output], name=\"sum_grad_W_out\")\n\n self._W1 = tf.Variable(tf.random_uniform([self._n_input, self._n_hidden1], -1, 1), name=\"W1\")\n self._W2 = tf.Variable(tf.random_uniform([self._n_hidden1, self._n_hidden2], -1, 1), name=\"W2\")\n self._W3 = tf.Variable(tf.random_uniform([self._n_hidden2, self._n_hidden3], -1, 1), name=\"W3\")\n self._W_out = tf.Variable(tf.random_normal([self._n_hidden3, self._n_output]), name=\"W_out\")\n\n layer1 = tf.nn.tanh(tf.matmul(self._input, self._W1))\n layer2 = tf.nn.tanh(tf.matmul(layer1, self._W2))\n layer3 = tf.nn.tanh(tf.matmul(layer2, self._W3))\n\n self._prediction = tf.nn.sigmoid(tf.matmul(layer3, self._W_out))\n\n self._gradients = tf.gradients(self._prediction, [self._W1, self._W2, self._W3, self._W_out])\n\n self._update_W1 = self._W1.assign(self._W1 + self._lr * self._error * self._sum_grad_W1)\n self._update_W2 = self._W2.assign(self._W2 + self._lr * self._error * self._sum_grad_W2)\n self._update_W3 = self._W3.assign(self._W3 + self._lr * self._error * self._sum_grad_W3)\n self._update_W_out = self._W_out.assign(self._W_out + self._lr * self._error * self._sum_grad_W_out)\n\n self._sess.run(tf.global_variables_initializer())\n\n self.reset_history_buffer()\n\n def get_architecture(self):\n return \"12-tanh(300)-tanh(300)-tanh(300)-sigmoid(1)\"\n\n def reset_history_buffer(self):\n self._prediction_buffer = []\n self._prediction_gradient_buffer = []\n\n def get_last_prediction(self):\n if len(self._prediction_buffer) > 0:\n return self._prediction_buffer[-1]\n\n def get_last_prediction_gradient(self):\n if len(self._prediction_gradient_buffer) > 0:\n return self._prediction_gradient_buffer[-1]\n\n def select_edge(self, board_state):\n zero_indices = []\n for i in range(len(board_state)):\n if board_state[i] == 0:\n zero_indices.append(i)\n if random.random() < self._epsilon:\n random_index = random.choice(zero_indices)\n # store history\n new_state = [x for x in board_state]\n new_state[random_index] = 1\n new_state = np.reshape(new_state, (1, len(new_state)))\n new_state_value, gradients = self._sess.run([self._prediction, self._gradients],\n feed_dict={self._input: new_state})\n self._prediction_buffer.append(new_state_value[0][0])\n self._prediction_gradient_buffer.append(gradients)\n return random_index\n else:\n best_value = 0.0\n best_value_gradient = None\n best_state_index = zero_indices[0]\n for zero_index in zero_indices:\n new_state = [x for x in board_state]\n new_state[zero_index] = 1\n new_state = np.reshape(new_state, (1, len(new_state)))\n new_state_value, gradients = self._sess.run([self._prediction, self._gradients],\n feed_dict={self._input: new_state})\n if new_state_value >= best_value:\n best_value = new_state_value\n best_value_gradient = gradients\n best_state_index = zero_index\n # store history\n self._prediction_buffer.append(best_value[0][0])\n self._prediction_gradient_buffer.append(best_value_gradient)\n return best_state_index\n\n def get_epsilon(self):\n return self._epsilon\n\n def set_epsilon(self, eps):\n self._epsilon = eps\n\n def get_learning_rate(self):\n return self._learning_rate\n\n def set_learning_rate(self, lr):\n self._learning_rate = lr\n\n def update(self, prediction_history, prediction_gradient_history):\n if len(prediction_history) > 1:\n error = prediction_history[-1] - prediction_history[-2]\n sum_grad_W1 = np.sum(prediction_gradient_history[:-1], axis=0)[0]\n sum_grad_W2 = np.sum(prediction_gradient_history[:-1], axis=0)[1]\n sum_grad_W3 = np.sum(prediction_gradient_history[:-1], axis=0)[2]\n sum_grad_W_out = np.sum(prediction_gradient_history[:-1], axis=0)[3]\n self._update_params(error, sum_grad_W1, sum_grad_W2, sum_grad_W3, sum_grad_W_out)\n\n def update_terminal(self, prediction_history, prediction_gradient_history, target):\n error = target - prediction_history[-1]\n sum_grad_W1 = np.sum(prediction_gradient_history, axis=0)[0]\n sum_grad_W2 = np.sum(prediction_gradient_history, axis=0)[1]\n sum_grad_W3 = np.sum(prediction_gradient_history, axis=0)[2]\n sum_grad_W_out = np.sum(prediction_gradient_history, axis=0)[3]\n self._update_params(error, sum_grad_W1, sum_grad_W2, sum_grad_W3, sum_grad_W_out)\n\n def update_offline(self, prediction_history, prediction_gradient_history, target):\n if len(prediction_history) > 0:\n for i in range(1, len(prediction_history) + 1):\n prev = prediction_history[i - 1]\n last = prediction_history[i] if i < len(prediction_history) else target\n error = last - prev\n sum_grad_W1 = np.sum(prediction_gradient_history[:i], axis=0)[0]\n sum_grad_W2 = np.sum(prediction_gradient_history[:i], axis=0)[1]\n sum_grad_W3 = np.sum(prediction_gradient_history[:i], axis=0)[2]\n sum_grad_W_out = np.sum(prediction_gradient_history[:i], axis=0)[3]\n self._update_params(error, sum_grad_W1, sum_grad_W2, sum_grad_W3, sum_grad_W_out)\n\n def _update_params(self, error, sum_grad_W1, sum_grad_W2, sum_grad_W3, sum_grad_W_out):\n self._sess.run([self._update_W1, self._update_W2, self._update_W3, self._update_W_out],\n feed_dict={self._lr: self._learning_rate, self._error: error,\n self._sum_grad_W1: sum_grad_W1,\n self._sum_grad_W2: sum_grad_W2,\n self._sum_grad_W3: sum_grad_W3,\n self._sum_grad_W_out: sum_grad_W_out})\n\n def print_params(self, f):\n params = self._sess.run([self._W1])\n f.write(\"W1: %s\\n\" % params[0].tolist())\n params = self._sess.run([self._W2])\n f.write(\"W2: %s\\n\" % params[0].tolist())\n params = self._sess.run([self._W3])\n f.write(\"W3: %s\\n\" % params[0].tolist())\n params = self._sess.run([self._W_out])\n f.write(\"W_out: %s\\n\" % params[0].tolist())\n\n def print_gradients(self):\n print(self._prediction_gradient_buffer)\n","sub_path":"deepdab/ai/td1_gradient_policy_mlp_v2.py","file_name":"td1_gradient_policy_mlp_v2.py","file_ext":"py","file_size_in_byte":7862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"407593682","text":"# -*- coding: utf-8 -*-\nimport StringIO\nimport math as m\nimport cv2\nimport numpy as np\nimport angus\n\ndef main(stream_index):\n camera = cv2.VideoCapture(0)\n camera.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 640);\n camera.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 480);\n camera.set(cv2.cv.CV_CAP_PROP_FPS, 10)\n\n if not camera.isOpened():\n print(\"Cannot open stream of index {}\".format(stream_index))\n exit(1)\n\n print(\"Input stream is of resolution: {} x {}\".format(camera.get(3), camera.get(4)))\n\n conn = angus.connect()\n service = conn.services.get_service('gaze_analysis', 1)\n service.enable_session()\n\n while camera.isOpened():\n ret, frame = camera.read()\n if not ret:\n break\n\n ### angus.ai computer vision services require gray images right now.\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n ret, buff = cv2.imencode(\".jpg\", gray, [cv2.IMWRITE_JPEG_QUALITY, 80])\n buff = StringIO.StringIO(np.array(buff).tostring())\n\n job = service.process({\"image\": buff})\n res = job.result\n\n for face in res['faces']:\n x, y, dx, dy = map(int, face['roi'])\n cv2.rectangle(frame, (x, y), (x+dx, y+dy), (0,255,0))\n\n psi = face['head_roll']\n theta = - face['head_yaw']\n phi = face['head_pitch']\n\n cx = int(x + 0.5*dx)\n cy = int(y + 0.5*dy)\n\n length = 150\n ### See here for details : https://en.wikipedia.org/wiki/Rotation_formalisms_in_three_dimensions\n xvec = int(length*(m.sin(phi)*m.sin(psi) - m.cos(phi)*m.sin(theta)*m.cos(psi)))\n yvec = int(- length*(m.sin(phi)*m.cos(psi) - m.cos(phi)*m.sin(theta)*m.sin(psi)))\n\n cv2.line(frame, (cx, cy), (cx+xvec, cy+yvec), (255, 0, 0), 3)\n\n\n cv2.imshow('original', frame)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n ### Disabling session on the server\n service.disable_session()\n\n camera.release()\n cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n ### Web cam index might be different from 0 on your setup.\n ### To grab a given video file instead of the host computer cam, try:\n ### main(\"/path/to/myvideo.avi\")\n main(0)\n","sub_path":"services/gazeanalysis/gazeanalysis_fromwebcam.py","file_name":"gazeanalysis_fromwebcam.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"137722040","text":"import events as e\nfrom .additional_definitions import *\nfrom ._dqn_presets import *\nfrom ._reward_presets import *\n\nHYPER_PARAMETERS = {\n \"USE_CUDA\": True,\n \"PLOT_PREPROCESSING\": True,\n \"DEBUG_TRAINING_RESULT\": False,\n \"MODEL_ARCHITECTURE\": MODEL_ARCHITECTURE_DQN_TYPE_L2_FULL,\n}\n\nHYPER_PARAMETERS_TRAIN = {\n \"EXPLOIT_SYMMETRY\": False,\n \"USE_8_BATCHES\": False,\n \"REPLAY_BUFFER_CAPACITY\": 250000,\n \"BATCH_SIZE\": 64,\n \"LEARNING_RATE\": 0.001,\n \"GAMMA\": 0.99,\n \"TARGET_STEPS\": 1000,\n \"EPSILON_START\": 0.1,\n \"EPSILON_END\": 0.1,\n \"EPOCH_START_EPSILON_DECAY\": 2,\n \"EPOCH_STOP_EPSILON_DECAY\": 10,\n \"EPOCH_LENGTH_TRAINING\": 250,\n \"EPOCH_LENGTH_VALIDATION\": 75,\n \"LOOP_THRESHOLD\": 3,\n \"LOOP_NUM_CHECKS\": 10,\n \"SKIP_LOOP\": False,\n \"INSERT_EVENTS\": EVENTS_ALL\n}\n\nHYPER_PARAMETERS_PROCESSING = {\n \"CRATE_DISTANCE_DISCOUNT_FACTOR\": 0.9,\n \"COIN_DISTANCE_DISCOUNT_FACTOR\": 0.9,\n \"VISITED_GAIN\": 0.05,\n \"VISITED_MAX\": 1,\n \"SONAR_RANGE\": 5,\n \"SONAR_BAD_THRESHOLD\": 0.2,\n \"SONAR_GOOD_THRESHOLD\": 0.4,\n \"USE_EXPLOSION_MAP\": False\n}\n\nGAME_REWARDS = GAME_REWARDS_PROCESSED\n\nGAME_REWARD_FACTORS = GAME_REWARD_FACTORS_PROCESSED","sub_path":"agent_code/DQN_agent/_parameters.py","file_name":"_parameters.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"475854325","text":"\"\"\" Volume of histogram:\n Calculate the total amount of water that a\n histogram graph can hold if poured water on top\n\n |\n | |..........\n | ...............| | \n | | | |\n | | | | | | \n |____|____|_________|____|____|_________>\n\n Ex: [2,1,0,4,1,3,0,0]\n\n\"\"\"\n\n#histogram = [0, 0, 4, 0, 0, 6, 0, 0, 3, 0, 5, 0, 1, 0, 0, 0]\nhistogram = [0,4,6,0,5,2,0,0,3,0,0,0]\n\ndef calculate_vol(histo):\n # Boundries\n b_start = None\n b_end = None\n current_boundry = None\n \n boundries = []\n p = 0\n\n # Loop\n while p < len(histo):\n if histo[p] <= 0:\n p+=1\n continue\n max_pos = None\n max_boundry = None\n cont = False\n for i in range(p+1,len(histo)):\n if max_boundry == None:\n max_boundry = histo[i]\n if histo[i] >= max_boundry:\n max_boundry = histo[i]\n max_pos = i\n if histo[p] < histo[i]:\n boundries.append((histo[p],histo[i]))\n p=i\n cont = True\n break\n if cont:\n continue\n boundries.append((histo[p],histo[max_pos]))\n p = max_pos\n \n return boundries\n\n\nif __name__ == '__main__':\n calculate_vol(histogram)\n","sub_path":"recap/ex6.py","file_name":"ex6.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"108876278","text":"import math\nnum = 600851475143\nnumtest = 21002\nnum1 = 12\n\ndef primefactor(number):\n factors=[]\n while(number>1):\n factor = getFactors(number)\n factors.append(factor)\n number //= factor\n return factors\n\n\ndef getFactors(number):\n if number % 2 ==0:\n return 2\n for x in range(3, int(math.sqrt(number))+1, 2):\n if number % x == 0:\n return x\n return number\nprint(primefactor(num))\n","sub_path":"SeeTheSaenz/standardlibrary/euler1-10/euler3.py","file_name":"euler3.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"95566710","text":"import scrapy \nimport json\nimport os\nfrom datetime import datetime\n\nclass H2RSpider(scrapy.Spider):\n name = \"h2r\"\n\n start_urls = [\n 'https://hentai2read.com/hentai-list'\n ]\n\n # os.chdir('..')\n # root = os.path.abspath(os.curdir)\n # os.chdir('scrapers')\n # meta_num = len([x for x in os.listdir(root + '/data') if 'meta' in x])\n # file_path = f'{root}/data/meta_{meta_num}.json'\n\n # with open(file_path, 'w+') as f:\n # f.write(json.dumps({\n # \"meta\": {\n # \"date\": str(datetime.now())\n # }\n # })a)\n\n def parse(self, response):\n content = response.css('section.content') \n for manga in content.css('div.img-container'):\n basic_info = {\n 'title': manga.css('h2.rf-title::text').extract_first(),\n 'url': manga.css('div.img-overlay a::attr(href)').extract_first(),\n 'thumbnail': manga.css('img::attr(data-src)').extract_first()\n }\n yield scrapy.Request(basic_info['url'], callback=self.parse_manga_info, meta={'basic_info': basic_info})\n \n # Next pages\n current_page = response.css('ul.pagination li.active')\n next_page = current_page.xpath('following-sibling::li[1]')\n link = next_page.css('a::attr(href)').extract_first()\n # if '2' not in link:\n link = response.urljoin(link)\n yield scrapy.Request(link, callback=self.parse)\n\n def parse_manga_info(self, response):\n def parse_alt_titles(alt_titles_selector_list):\n titles = alt_titles_selector_list.extract_first().split(',')\n return titles\n\n def parse_rating(rating_selector_list):\n rating = rating_selector_list.extract_first()\n index = rating.index('/')\n with_index = rating.index('with')\n votes_index = rating.index('votes')\n\n num_votes = stoi(rating[with_index+5: votes_index-1])\n score = stoi(rating[index-1])\n return {\n 'score': score,\n 'votes': num_votes\n }\n\n # Parse string to int, remove all nonnumeric chars\n def stoi(value):\n output = ''.join(ch for ch in value if ch.isdigit())\n try:\n output = int(output)\n except:\n output = None\n return output\n\n def extract_value(key, selector_list, multiple_values=False):\n if multiple_values:\n output = []\n query = '//b[text()=\"' + key + '\"]//following-sibling::*'\n siblings = selector_list.xpath(query)\n for sibling in siblings:\n output.append(sibling.css('a::text').extract_first())\n else:\n query = '//b[text()=\"' + key + '\"]//following-sibling::a[1]//text()'\n output = selector_list.xpath(query)\n output = output.extract_first()\n return output\n\n info = response.css('div ul.list')\n alt_titles = info.css('li::text')\n rating = response.css('div.push-10-t small::text')\n \n output = {\n 'alt_titles': parse_alt_titles(alt_titles),\n 'parody': extract_value('Parody', info, True),\n 'ranking': stoi(extract_value('Ranking', info)),\n 'rating': parse_rating(rating),\n 'status': extract_value('Status', info),\n 'release_year': stoi(extract_value('Release Year', info)),\n 'views': stoi(extract_value('View', info)),\n 'author': extract_value('Author', info, True),\n 'artist': extract_value('Artist', info, True),\n 'category': extract_value('Category', info, True),\n 'content': extract_value('Content', info, True),\n 'character': extract_value('Character', info, True),\n 'language': extract_value('Language', info)\n }\n output = {**response.meta['basic_info'], **output}\n \n yield output","sub_path":"scrapers/scrapers/spiders/h2r_scraper.py","file_name":"h2r_scraper.py","file_ext":"py","file_size_in_byte":4024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"365580341","text":"import random\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\ndef findWaitingTime(pro,n,wt,btime,order,s):\r\n complete=0\r\n t=0\r\n minm=max(btime)+1\r\n shortest=0\r\n check=0\r\n while(complete!=n):\r\n for j in range(n):\r\n if((pro[j][1] <= t) and (btime[j] 0 :\n return matcheslist.group(0)\n return None\n\n\n # \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n def match(self, data, **kwargs):\n \"\"\"\n \"\"\"\n raise Exception ('Not implemented yet')\n\n\n # \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n def findall(self, data, **kwargs):\n \"\"\"\n \"\"\"\n method = 'findall'\n matcheslist = self.re_method (data, method=method, **kwargs)\n matches = []\n if len(matcheslist)>0 :\n for i in range(len(matcheslist)):\n #print ('Debug: len(data):',len(data),'; start:', start,'; end:',end)\n matches.append(matcheslist.group(i).group())\n return matches\n return None\n\n\n\n # \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n def finditer(self, data, **kwargs):\n \"\"\"\n \"\"\"\n\n method = 'finditer'\n matcheslist = self.re_method (data, method=method, **kwargs)\n\n # build the structure to return\n if len(matcheslist)>0 :\n return matcheslist \n return None \n\n\n # \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n def annotate(self, annotation, data, group = [0], action = 'sub', iob = False, **kwargs):\n \"\"\"\n \"\"\"\n\n prefix = ''\n\n data_copy = list(data)\n if isinstance(annotation, dict):\n annotation = [annotation]\n\n iter = self.finditer(data, **kwargs) #reversed([finditer(pattern, data)]) \n if iter != None:\n size = 0\n for m in iter:\n for g in group:\n #print ('Debug: m={} g={} start={} end={}'.format(m, g, m.start(g), m.end(g)))\n if action == 'sub':\n # data_copy[m.start(g):m.end(g)] = annotation\n data_copy[m.start(g)+size:m.end(g)+size] = annotation\n size += len(annotation) - (m.end(g) - m.start(g)) \n\n elif action == 'update':\n if len(annotation) == 1:\n for k in annotation[0].keys():\n for r in range (m.start(g), m.end(g)):\n if iob and r == m.start(g): \n prefix = PREFIX_BEGIN\n elif iob: \n prefix = PREFIX_INSIDE\n data_copy[r][k] = prefix + annotation[0][k]\n else:\n for k in annotation[0].keys():\n for r in range (m.start(g), m.end(g)):\n if len(annotation) == (m.end(g) - m.start(g)): \n if iob and r == m.start(g): \n prefix = PREFIX_BEGIN\n elif iob: \n prefix = PREFIX_INSIDE\n data_copy[r][k] = prefix + annotation[0][k]\n else: # Verbosity not the same size\n data_copy = data \n break\n\n elif action == 'extend':\n if len(annotation) == 1:\n for k in annotation[0].keys():\n for r in range (m.start(g), m.end(g)):\n if k not in data_copy[r]:\n if iob and r == m.start(g): \n prefix = PREFIX_BEGIN\n elif iob: \n prefix = PREFIX_INSIDE\n data_copy[r][k] = prefix+annotation[0][k]\n else:\n for k in annotation[0].keys():\n for r in range (m.start(g), m.end(g)):\n if len(annotation) == (m.end(g) - m.start(g)):\n if k not in data_copy[r]:\n if iob and r == m.start(g): \n prefix = PREFIX_BEGIN\n elif iob: \n prefix = PREFIX_INSIDE\n data_copy[r][k] = prefix+annotation[0][k]\n else: # Verbosity not the same size\n data_copy = data \n break \n return data_copy\n\n\n # \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\" \n def sub (self, repl, data, group = [0], **kwargs):\n \"\"\"\n Return the data obtained by replacing the leftmost non-overlapping occurrences of \n pattern matches or group of matches in data by the replacement repl. \n \"\"\"\n return self.annotate (repl, data, group, action = 'sub', iob = False, **kwargs)\n\n\n # \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\" \n def subn (self, repl, data, **kwargs):\n \"\"\"\n Perform the same operation as sub(), but return a tuple (new_string, number_of_subs_made).\n \"\"\"\n raise Exception (\"Not implemented yet !\")\n\n # \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\" \n def update (self, repl, data, group = [0], iob = False, **kwargs):\n \"\"\"\n Return the data after updating (and extending) the features of a match or a group of a match \n with the features of a dict or a sequence of dicts (of the same size as the group/match). \n \"\"\"\n return self.annotate (repl, data, group = group, action = 'update', iob = iob, **kwargs)\n\n\n # \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\" \n def extend (self, repl, data, group = [0], iob = False, **kwargs):\n \"\"\"\n Return the data after updating (and extending) the features of a match or a group of a match \n with the features of a dict or a sequence of dicts (of the same size as the group/match). \n \"\"\"\n return self.annotate (repl, data, group = group, action = extend, iob = iob, **kwargs)\n\n\n # \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n def split(self, data, **kwargs):\n \"\"\"\n \"\"\"\n raise Exception ('Not implemented yet')\n\n\n # \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n def __repr__(self):\n return ''\n\n\n# \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\ndef parse_syntactic(pattern, lexicons, **kwargs):\n\n # Build the lexer \n l = Lexer(pattern=pattern, lexicons=lexicons)\n\n # Build the syntax parser\n y = SyntacticPatternParser(tokens=l.tokens, **kwargs)\n\n logging.info ('# ----------------------------------')\n logging.info ('# Pattern=\\t%s',pattern)\n logging.info ('# Lexicons=\\t%s',lexicons)\n logging.info ('Starting syntax analysis...')\n\n # we start the compilation to get an internal representation of patterns\n\n y.parser.parse(pattern, l.lexer, tracking=True)\n\n return l\n","sub_path":"pyrata/compiled_pattern_re.py","file_name":"compiled_pattern_re.py","file_ext":"py","file_size_in_byte":9065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"507687670","text":"from functools import partial\nimport sys\nimport warnings\nimport os\n\nimport dill\n\nif sys.version_info > (3, 0, 0):\n from functools import lru_cache\n\n\nSKLEARN_VERBS = (\n 'fit',\n 'fit_transform',\n 'predict',\n 'predict_log_proba',\n 'predict_proba',\n 'score',\n)\n\n\ntry:\n if os.environ.get('MLPIPE_WITHOUT_SKLEARN') is not None:\n raise ImportError\n else:\n import sklearn.base\n SKLEARN_AVAILABLE = True\n\nexcept ImportError:\n SKLEARN_AVAILABLE = False\n warnings.warn(\"optional 'sklearn' dependency not available\", ImportWarning)\n\n\nclass Pipe(object):\n \"\"\"Pipe class.\n\n An instance of this class can hold a number of Segments and in whole forms the pipeline.\n \"\"\"\n\n def __init__(self, cached=False, cache_maxsize=128):\n\n if cached and sys.version_info > (3, 0, 0):\n self.__eval = lru_cache(cache_maxsize)(self.__eval)\n self.__cache_info__ = self.__eval.cache_info\n elif cached:\n warnings.warn(\"Ignoring 'cached' on python versions < 3.0.0\")\n\n self.segments = []\n\n def __add__(self, segment):\n \"\"\"Internal function, will be called when adding a Segment to the Pipe by using the '+' operator.\n\n Args:\n segment: An instance of Segment, this can contain any type of callable. A callable non-Segment object\n will be automatically encapsulated by a Segment.\n\n Returns: the Pipe instance\n\n \"\"\"\n\n if not isinstance(segment, Segment):\n segment = Segment(segment)\n\n self.segments.append(segment)\n segment._set_pipe(self)\n return self\n\n def __repr__(self):\n repr_str = \"a pipe, consisting of:\\n\"\n\n for i, segment in enumerate(self.segments):\n repr_str += \"segment {}: {}\\n\".format(i, segment)\n\n return repr_str\n\n def __getattr__(self, name):\n if name.startswith('__') and name.endswith('__'):\n return super(Pipe, self).__getattr__(name)\n\n else:\n return partial(self.__eval, name)\n\n def __call__(self, *args, **kwargs):\n attr = kwargs.get('attr', '__call__')\n return self.__eval(attr, *args, **kwargs)\n\n def __assert_list(self, args):\n if type(args) not in (list, tuple,):\n # force iterability of args until last evaluation\n args = (args,)\n\n return args\n\n def __eval(self, attr, *args, **kwargs):\n\n if attr in SKLEARN_VERBS and SKLEARN_AVAILABLE:\n # dispatch to sklearn compatible pipe evaluator\n return self.__eval_sk_style(attr, *args)\n\n elif attr in SKLEARN_VERBS and not SKLEARN_AVAILABLE:\n warnings.warn('sklearn warning: \\'{}\\' seems to be an sklearn verb, but sklearn is not available,'\n 'continuing processing without sklearn logic'.format(attr), UserWarning)\n\n for i, segment in enumerate(self.segments):\n args = getattr(segment, attr)(*args)\n\n if i < len(self.segments) - 1:\n args = self.__assert_list(args)\n\n return args\n\n def __eval_sk_style(self, attr, *args, **kwargs):\n # todo\n # check if obj is one inheriting an sklearn class\n # if not process as regular func\n\n if attr == 'fit':\n\n X, y, var_args = args[0], args[1], args[2:]\n\n for i, segment in enumerate(self.segments):\n\n if self.__is_sklearn_obj(segment.obj):\n\n getattr(segment, 'fit')(X, y, *var_args)\n\n if i < len(self.segments) - 1:\n X = getattr(segment, 'transform')(X, *var_args)\n\n else:\n eval_val = getattr(segment, attr)(X, y, *var_args)\n X, y, var_args = eval_val[0], eval_val[1], eval_val[2:]\n\n return self\n\n elif attr in ('predict', 'predict_log_proba', 'score', 'predict_proba', ):\n\n X, var_args = args[0], args[1:]\n\n for i, segment in enumerate(self.segments):\n\n if self.__is_sklearn_obj(segment.obj):\n\n if i < len(self.segments) - 1:\n X = getattr(segment, 'transform')(X, *var_args)\n\n else:\n output = getattr(segment, attr)(X, *var_args)\n\n else:\n eval_val = getattr(segment, attr)(X, *var_args)\n X, var_args = eval_val[0], eval_val[1:]\n\n return output\n\n def __is_sklearn_obj(self, obj):\n return issubclass(obj.__class__, (sklearn.base.BaseEstimator,\n sklearn.base.TransformerMixin,\n sklearn.base.ClassifierMixin,))\n\n @classmethod\n def _load(cls, filename):\n with open(filename, 'rb') as f:\n return dill.load(f)\n\n def _save(self, filename):\n with open(filename, 'wb') as f:\n dill.dump(self, f)\n\n def _dump(self, *args, **kwargs):\n self._save(*args, **kwargs)\n\n def _dumps(self):\n return dill.dumps(self)\n\n\nclass Segment(object):\n \"\"\"The Segment class can hold any callable object.\n\n Args:\n obj: a callable object\n description: an optional description of the segment/object\n args: additional \\*args which should always be passed to the object when called\n kwargs: additional \\**kwargs which should always be passed to the object when called\n\n \"\"\"\n def __init__(self, obj, description='anonymous segment', **kwargs):\n self.obj = obj\n self.description = description\n self.pipe = None\n\n self.kwargs = kwargs\n\n def _set_pipe(self, pipe):\n self.pipe = pipe\n\n def __repr__(self):\n return self.description\n\n def __call__(self, *args, **kwargs):\n try:\n return partial(self.obj, **self.kwargs)(*args, **kwargs)\n except TypeError:\n raise TypeError(\"Segment '{}' object '{}' not callable\".format(self.description, self.obj))\n\n def __getattr__(self, name):\n\n if name.startswith('__') and name.endswith('__'):\n return super(Segment, self).__getattr__(name)\n\n if hasattr(self.obj, name):\n return partial(getattr(self.obj, name), **self.kwargs)\n else:\n # return main object if attrs isn't found (and assume it is callable)\n return partial(getattr(self.obj, '__call__'), **self.kwargs)\n\nif __name__ == '__main__':\n pass\n","sub_path":"src/mlpipe/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":6515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"600608933","text":"#Word Freq Reddit \r\n\r\nfrom collections import Counter\r\nimport re\r\n\r\nimport nltk\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nnltk.download('stopwords')\r\n\r\nstopwords = nltk.corpus.stopwords.words('english')\r\n\r\nblah = re.findall(r'\\w+', open('grad.txt', encoding='utf8').read().lower())\r\n\r\n\r\ndef remove_stopwords(txt):\r\n txt_clean = [word for word in txt if word not in stopwords]\r\n return txt_clean\r\n\r\nblah = remove_stopwords(blah)\r\n\r\n\r\ncount = Counter(blah).most_common(25)\r\ncount = count[2:]\r\n\r\n\r\nsubjects = [x[0] for x in count]\r\nnumbers = [x[1] for x in count]\r\n\r\nleft = subjects\r\nheight = numbers\r\n\r\nplt.xlabel('Subjects', size = 0.5)\r\nplt.xticks(size = 8)\r\nplt.yticks(size=10)\r\nplt.ylabel('Frequency',size = 10)\r\n\r\n\r\nplt.title('Most Frequent Words in r/Grad School Submissions from 10-5-19 to 11-7-19')\r\n\r\n\r\n\r\nplt.bar(left, height)\r\n\r\nplt.show()\r\n\r\n","sub_path":"gradschool.py","file_name":"gradschool.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"31091865","text":"from elasticsearch import Elasticsearch\nimport json\nimport numpy as np\nimport re\nimport time\n\nentities = set([]) # using a set so duplicates are not added\nes = Elasticsearch()\nknowledgebase = \"dbpedia_2015_10\"\n\n\n\n\ndef mlm(query):\n query_terms = query.split()\n for term in query_terms:\n mlm_partial_query(term)\n\n\ndef mlm_partial_query(query):\n query = {\n \"sort\": [\"_score\"],\n \"query\": {\n \"match\": {\n \"names\": text\n }\n }\n }\n","sub_path":"MLM.py","file_name":"MLM.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"200260726","text":"\"\"\"\nCustom finders that can be used together\nwith StaticFileStorage objects to find\nfiles that should be collected by collectstatic.\n\"\"\"\nfrom pathlib import Path\n\nimport sass\nfrom django.conf import settings\nfrom django.contrib.staticfiles.finders import BaseFinder\nfrom django.core.files.storage import FileSystemStorage\n\n\nclass SimpleBulmaFinder(BaseFinder):\n \"\"\"\n A custom Finder class to compile bulma to static files,\n and then return paths to those static files so they may be collected\n by the static collector.\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize the finder with user settings and paths.\"\"\"\n # Try to get the Bulma settings. The user may not have created this dict.\n try:\n self.bulma_settings = settings.BULMA_SETTINGS\n except AttributeError:\n self.bulma_settings = {}\n\n self.simple_bulma_path = Path(__file__).resolve().parent\n self.extensions = self.bulma_settings.get(\"extensions\", \"_all\")\n self.variables = self.bulma_settings.get(\"variables\", {})\n self.storage = FileSystemStorage(self.simple_bulma_path)\n\n def _get_bulma_css(self):\n \"\"\"Compiles the bulma css file and returns its relative path.\"\"\"\n # Start by unpacking the users custom variables\n scss_string = \"\"\n for var, value in self.variables.items():\n scss_string += f\"${var}: {value};\\n\"\n\n # Now load bulma\n scss_string += f'@import \"{self.simple_bulma_path}/bulma.sass\";'\n\n # Now load in the extensions that the user wants\n if self.extensions == \"_all\":\n scss_string += f'@import \"{self.simple_bulma_path}/sass/extensions/_all\";\\n'\n elif isinstance(self.extensions, list):\n for extension in self.extensions:\n scss_string += f'@import \"{self.simple_bulma_path}/sass/extensions/_{extension}\";\\n'\n\n # Store this as a css file\n css_string = sass.compile(string=scss_string)\n with open(f\"{self.simple_bulma_path}/css/bulma.css\", \"w\") as bulma_css:\n bulma_css.write(css_string)\n\n return \"css/bulma.css\"\n\n def _get_bulma_js(self):\n \"\"\"\n Return a list of all the js files that are\n needed for the users selected extensions.\n \"\"\"\n js_files = []\n js_folder = self.simple_bulma_path / \"js\"\n\n if self.extensions == \"_all\":\n for filename in js_folder.iterdir():\n js_files.append(f\"js/{filename.name}\")\n else:\n for filename in js_folder.iterdir():\n extension_name = str(filename.stem)\n\n if extension_name in self.extensions:\n js_files.append(f\"js/{filename.name}\")\n\n return js_files\n\n def find(self, path, all=False):\n \"\"\"\n Given a relative file path, find an absolute file path.\n\n If the ``all`` parameter is False (default) return only the first found\n file path; if True, return a list of all found files paths.\n \"\"\"\n absolute_path = str(self.simple_bulma_path / path)\n\n if all:\n return [absolute_path]\n return absolute_path\n\n def list(self, ignore_patterns):\n \"\"\"\n Return a two item iterable consisting of\n the relative path and storage instance.\n \"\"\"\n files = [self._get_bulma_css()]\n files.extend(self._get_bulma_js())\n\n for path in files:\n yield path, self.storage\n","sub_path":"django_simple_bulma/finders.py","file_name":"finders.py","file_ext":"py","file_size_in_byte":3471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"128238537","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/7/19 0019 18:38\n# @Author : Langzi\n# @Blog : www.langzi.fun\n# @File : run.py\n# @Software: PyCharm\nimport sys\nsys.path.append('..')\nfrom app import create_app\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\napp = create_app()\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0',debug=app.config['DEBUG'],port=9827)\n","sub_path":"Learn_Flas/flask_study/lab-3/app/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"180378635","text":"from __future__ import unicode_literals\n\nfrom .zz import TestDerive\nfrom ....lib import SysMock\n\n_mock = SysMock()\n\nclass T(TestDerive):\n def setUp(self):\n with _mock.path()(r'a.c').open(r'wb'):\n pass\n @staticmethod\n def clear():\n unlink = _mock.unlink()\n for filename in r'a.c a.o'.split():\n unlink(filename)\n def test(self):\n self.derive_print(_feed, _expected, sys=_mock)\n self.clear()\n\n_feed = '''\na.o\\t:a.c $(\n\\tcompile $<\n)\n'''[1:]\n\n_expected = r'''\na.o {\n compile a.c\n}\n'''[1:]\n","sub_path":"apymake/zztest/t1eval/t13derive/derive00.py","file_name":"derive00.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"97963269","text":"import functools\nimport multiprocessing\n\nimport numpy as np\nfrom numba import jit\nfrom pandas import DataFrame\n\nfrom pypairs import helper\n\n\ndef filter_matrix(matrix, gene_names, sample_names, subset_genes, subset_samples, rm_zeros=True):\n subset_genes_mask = helper.to_boolean(subset_genes, gene_names)\n\n zero = 0\n if rm_zeros:\n zeros_mask = np.invert(np.all(matrix == 0, axis=1))\n genes_mask = np.logical_and(subset_genes_mask, zeros_mask)\n zero = len(zeros_mask) - sum(zeros_mask)\n else:\n genes_mask = subset_genes_mask\n\n helper.print_info(\"O\", \"Filtered out {num} genes: {subset} subsetted, {notde} not expressed\".format(\n num=len(genes_mask) - sum(genes_mask),\n subset=len(subset_genes_mask) - sum(subset_genes_mask),\n notde=zero\n ), \"cyclone\")\n\n subset_samples_mask = helper.to_boolean(subset_samples, sample_names)\n\n helper.print_info(\"O\", \"Filtered out {subset} (subsetted) samples\".format(\n subset=len(subset_samples_mask) - sum(subset_samples_mask)), \"cyclone\")\n\n matrix = matrix[genes_mask, :]\n matrix = matrix[:, subset_samples_mask]\n\n gene_names_np = np.array(gene_names)[genes_mask]\n sample_names_np = np.array(sample_names)[subset_samples_mask]\n\n return matrix, gene_names_np, sample_names_np\n\n\n@jit(nopython=True)\ndef get_proportion(sample, min_pairs, pairs):\n hits = 0\n total = 0\n\n for i in range(len(pairs)):\n pair = pairs[i]\n\n a = sample[pair[0]]\n b = sample[pair[1]]\n\n if a > b:\n hits += 1\n if a != b:\n total += 1\n\n if hits < min_pairs:\n return None\n\n if total == 0:\n return 0\n return hits / total\n\n\n@jit(nopython=True)\ndef get_sample_score(sample, iterations, min_iter, min_pairs, pairs):\n cur_score = get_proportion(sample, min_pairs, pairs)\n\n if cur_score is None:\n return 0\n\n below = 0\n total = 0\n idx = sample\n for i in range(0, iterations):\n np.random.shuffle(idx)\n new_score = get_proportion(idx, min_pairs, pairs)\n if new_score is not None:\n if new_score < cur_score:\n below += 1\n total += 1\n\n if total == 0:\n return 0\n if total >= min_iter:\n return below / total\n\n\ndef get_phase_scores(matrix, cat, iterations, min_iter, min_pairs, pairs, used, processes):\n if pairs.size == 0:\n helper.print_info(\"E\", \"No marker pairs for category {}\".format(cat), \"cyclone\")\n return [0.0 for _ in matrix.T]\n\n if processes != 1:\n get_sample_score_par = functools.partial(get_sample_score, iterations=iterations, min_iter=min_iter,\n min_pairs=min_pairs, pairs=pairs)\n\n samples = [sample[used] for sample in matrix.T]\n\n with multiprocessing.Pool(processes=processes) as pool:\n phase_scores = pool.map(get_sample_score_par, samples)\n\n return list(phase_scores)\n else:\n phase_scores = [get_sample_score(sample[used], iterations, min_iter, min_pairs, pairs) for sample in matrix.T]\n\n return phase_scores\n\n\ndef filter_marker_pairs(marker_pairs, gene_names):\n gene_name_to_idx = {g: i for i, g in enumerate(gene_names)}\n removed = 0\n removed2 = 0\n\n marker_pairs_idx = {}\n\n used_masks = {}\n for cat, pairs in marker_pairs.items():\n used_mask = np.zeros(len(gene_names), dtype='bool')\n for pair in pairs:\n try:\n g1_idx = gene_name_to_idx[pair[0]]\n g2_idx = gene_name_to_idx[pair[1]]\n\n used_mask[g1_idx] = True\n used_mask[g2_idx] = True\n except KeyError:\n removed += 1\n used_masks[cat] = used_mask\n used_idx = np.where(used_mask)[0].tolist()\n\n new_idx = {u: i for i, u in enumerate(used_idx)}\n\n new_pairs_idx = []\n for pair in pairs:\n try:\n g1_idx = gene_name_to_idx[pair[0]]\n g2_idx = gene_name_to_idx[pair[1]]\n\n new_pairs_idx.append([new_idx[g1_idx], new_idx[g2_idx]])\n except KeyError:\n print(\"Genepair ({}, {}) not present in dataset\".format(pair[0], pair[1]))\n removed2 += 1\n\n marker_pairs_idx[cat] = np.array(new_pairs_idx)\n\n helper.print_info(\"O\", \"Translated marker pairs, {} removed\".format(removed), \"cyclone\")\n return marker_pairs_idx, used_masks\n\n\ndef cyclone(matrix, marker_pairs, gene_names, sample_names, subset_genes=None, subset_samples=None, iterations=1000,\n min_iter=100, min_pairs=50, rm_zeros=True, processes=1):\n helper.print_info(\"I\", \"Started and preprocessing matrix and marker pairs...\", \"cyclone\")\n matrix, gene_names_filtered, sample_names_filtered = filter_matrix(matrix, gene_names=gene_names,\n sample_names=sample_names,\n subset_genes=subset_genes,\n subset_samples=subset_samples, rm_zeros=rm_zeros)\n marker_pairs, used = filter_marker_pairs(marker_pairs, gene_names_filtered)\n\n helper.print_init_cyclone(matrix, marker_pairs, iterations, min_iter, min_pairs, processes)\n\n if processes == 0:\n processes = multiprocessing.cpu_count() - 1\n\n scores = {cat: get_phase_scores(matrix, cat, iterations, min_iter, min_pairs, pairs, used[cat], processes) for\n cat, pairs in marker_pairs.items()}\n\n scores_df = DataFrame(scores, columns=marker_pairs.keys())\n scores_df.index = sample_names_filtered\n scores_df['Prediction'] = scores_df.idxmax(axis=1)\n\n helper.print_info(\"O\", \"is done!\", \"cyclone\")\n\n return scores_df\n","sub_path":"pypairs/cyclone.py","file_name":"cyclone.py","file_ext":"py","file_size_in_byte":5802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"190527942","text":"# Time Complexity O(logn)\n#Space Complexity O(1)\n#Executed on Leetcode\nclass Solution(object):\n def findMin(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n l = 0\n h =len(nums)-1\n while l <= h:\n mid = l + (h-l)/2\n if ((mid == 0 or nums[mid-1] > nums[mid]) and (mid == len(nums)-1 or nums[mid+1]>nums[mid])):\n return nums[mid]\n\n if (nums[mid] < nums[h]):\n h =mid -1\n else:\n l = mid + 1\n return -1\n","sub_path":"findMinElementinRotatedArray.py","file_name":"findMinElementinRotatedArray.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"502821136","text":"import feedparser\nimport asyncio\nimport traceback\n\n\nasync def start(url, client, channel):\n prev_id = feedparser.parse(url)['items'][0]['guid']\n #prev_id = 'http://www.northernbinge.com/home/m/37298876/article/4146870'\n while True:\n await asyncio.sleep(10)\n try:\n new_article = feedparser.parse(url)['items'][0]\n if new_article['id'] != prev_id:\n prev_id = new_article['guid']\n await client.send_message(channel, '@everyone' + ' **' + new_article['title'] + '**\\n' + new_article['link'])\n except:\n traceback.print_exc()\n await asyncio.sleep(50)\n","sub_path":"feed.py","file_name":"feed.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"555673126","text":"# STOCKHOUSE\nimport pandas\nimport quandl\nimport time\nimport Adafruit_MCP4725\n\nauth = \"SNbqZypqf-CzSt51ZQkL\"\ndac1 = Adafruit_MCP4725.MCP4725(address=0x60, busnum=1)\ndac2 = Adafruit_MCP4725.MCP4725(address=0x61, busnum=1)\ndf1 = quandl.get(\"WIKI/AAPL\", authtoken=auth)\ndf2 = quandl.get(\"WIKI/AAPL\", authtoken=auth)\n\nstock1 = df1[\"Close\"]\nstock2 = df2[\"Close\"]\n\nscalar1 = 4096 / stock1.max()\nscalar2 = 4096 / stock2.max()\n\ndac1_data = stock1.as_matrix()\ndac2_data = stock2.as_matrix()\n\nif dac1_data.size >= dac2_data.size:\n\tdata_range = dac2_data.size\nelse:\n\tdata_range = dac1_data.size\n\ndef playStock():\n\tfor x in range(0, data_range):\n\t\tdac1.set_voltage(int(dac1_data[x]*scalar1))\n\t\tdac2.set_voltage(int(dac2_data[x]*scalar2))\n\t\tprint (dac1_data[x], dac2_data[x])\n\t\ttime.sleep(0.1)\n\nwhile True:\n\tplayStock()\n","sub_path":"stocksound.py","file_name":"stocksound.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"191032446","text":"import sqlite3\r\n\r\nconn = sqlite3.connect('dbs/utility_dbs/nodes.db')\r\nc = conn.cursor()\r\n\r\nnode_id = \"4146223419\"\r\nnode_ip = \"0.0.0.0\"\r\nnode_type = \"storage\"\r\n\r\nc.execute('INSERT INTO nodes (node_id, node_type, node_ip, last_connect) VALUES (\"' + node_id + '\", \"' + node_type + '\", \"' + node_ip + '\", \"last_connect\")')\r\nconn.commit()","sub_path":"gophernet/ins_node.py","file_name":"ins_node.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"400856247","text":"def dibujar(caracter,b, espacio):\n\n for fila in range(b):\n for columna in range(b):\n if(columna <= fila and fila + columna >= b-1 and columna <= (b-1)/2 or \ncolumna+fila == (b-1) or fila <= (b-1)/2 and columna == fila):\n print(espacio , caracter, end=\"\" )\n else:\n print(espacio, ' ', end=\"\")\n \n print()\n \ndef main():\n \n dibujar('*',11, ' ' )\n \n \nmain()\n\n\n'''\n\ncolumna+fila == (b-1) or fila <= (b-1)/2 and columna == fila\n\nfila <= (b-1)/2 and columna == fila \n'''","sub_path":"antena.py","file_name":"antena.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"47938393","text":"from paymentrails.configuration import Configuration\nfrom paymentrails.gateway import Gateway\nclass Recipient:\n \"\"\"\n A class that facilitates Client requests to\n the Trolley API in regards to Recipients.\n \"\"\"\n\n _attributes = {\n \"id\": \"\",\n \"routeType\": \"\",\n \"routeMinimum\": \"\",\n \"estimatedFees\": \"\",\n \"referenceId\": \"\",\n \"email\": \"\",\n \"name\": \"\",\n \"lastName\": \"\",\n \"firstName\": \"\",\n \"type\": \"\",\n \"taxType\": \"\",\n \"status\": \"\",\n \"language\": \"\",\n \"complianceStatus\": \"\",\n \"dob\": \"\",\n \"passport\": \"\",\n \"updatedAt\": \"\",\n \"createdAt\": \"\",\n \"gravatarUrl\": \"\",\n \"governmentId\": \"\",\n \"ssn\": \"\",\n \"primaryCurrency\": \"\",\n \"merchantId\": \"\",\n \"payoutMethod\": \"\",\n \"compliance\": \"\",\n \"accounts\": \"\",\n \"address\": \"\",\n }\n\n\n @staticmethod\n def find(recipient_id, term=\"\"):\n \"\"\"\n Retrieve a recipient\n A recipient_id is required::\n Recipient.find('R-fjeracjmuflh')\n \"\"\"\n config = Configuration(Configuration.public_key, Configuration.private_key, Configuration.enviroment)\n return Gateway(config).recipient.find(recipient_id, term)\n @staticmethod\n def create(body):\n \"\"\"\n Create a recipient\n A body is required::\n Recipient.create({\"type\": \"individual\", \"firstName\": \"John\",\n \"lastName\": \"Smith\", \"email\": \"jh@edxample.com\"})\n \"\"\"\n config = Configuration(Configuration.public_key, Configuration.private_key, Configuration.enviroment)\n return Gateway(config).recipient.create(body)\n\n @staticmethod\n def update(recipient_id, body):\n \"\"\"\n Update a recipient\n A recipient_id and body are required::\n Recipient.update({'firstName': 'tom'})\n \"\"\"\n config = Configuration(Configuration.public_key, Configuration.private_key, Configuration.enviroment)\n return Gateway(config).recipient.update(recipient_id, body)\n\n @staticmethod\n def delete(recipient_id):\n \"\"\"\n Delete a recipient\n A recipient_id is required::\n Recipient.delete('R-fjeracjmuflh')\n \"\"\"\n config = Configuration(Configuration.public_key, Configuration.private_key, Configuration.enviroment)\n return Gateway(config).recipient.delete(recipient_id)\n\n @staticmethod\n def search(page=1, page_number=10, term=\"\"):\n \"\"\"\n Query for a recipient\n Recipient.search(1,10,'test')\n \"\"\"\n config = Configuration(Configuration.public_key, Configuration.private_key, Configuration.enviroment)\n return Gateway(config).recipient.search(page, page_number, term)\n\n @staticmethod\n def _initialize(attributes):\n \"\"\"Initialize fields and return a dict of attributes.\"\"\"\n\n fields = [\n \"id\",\n \"routeType\",\n \"routeMinimum\",\n \"estimatedFees\",\n \"id\",\n \"referenceId\",\n \"email\",\n \"name\",\n \"lastName\",\n \"firstName\",\n \"type\",\n \"taxType\",\n \"status\",\n \"language\",\n \"complianceStatus\",\n \"dob\",\n \"passport\",\n \"updatedAt\",\n \"createdAt\",\n \"gravatarUrl\",\n \"governmentId\",\n \"ssn\",\n \"primaryCurrency\",\n \"merchantId\",\n \"payoutMethod\",\n \"compliance\", # TODO: Factory\n \"accounts\",\n \"address\", #TODO: Factory\n ]\n\n for field in fields:\n if attributes.get('recipient') is None:\n Recipient._attributes[field] = attributes.get(field)\n elif attributes['recipient'].get(field) is not None:\n Recipient._attributes[field] = attributes['recipient'][field]\n\n return Recipient._attributes\n\n @staticmethod\n def factory(attributes):\n \"\"\"Creates an instance of Recipient and returns it. \"\"\"\n instance = Recipient._initialize(attributes)\n return instance\n","sub_path":"paymentrails/recipient.py","file_name":"recipient.py","file_ext":"py","file_size_in_byte":4218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"377229827","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom src.common.column_name import container_usage_column_name\n\n# 统计在线服务容器平均12h内CPU资源使用率的概率分布(PDF)\nif __name__ == '__main__':\n\n path = '..\\dataset\\container_usage.csv'\n\n try:\n f = open(path, 'r', encoding='utf-8')\n data = pd.read_csv(f)\n df = pd.DataFrame(data)\n df.columns = container_usage_column_name\n grp = df[['timestamp', 'instance_id', 'cpu']].groupby('instance_id')\n mean_grp = grp.mean()\n\n mean_cpu_usage_by_container = mean_grp['cpu']\n\n fig, ax = plt.subplots(figsize=(5, 4))\n num_bins = 100\n\n ax.hist(mean_cpu_usage_by_container, num_bins, normed=1, # histtype='step',\n cumulative=False)\n\n ax.grid(True)\n # ax.legend(loc='right')\n ax.set_title('PDF of container\\'s mean CPU usage')\n ax.set_xlabel('mean CPU usage during 12h of a container (%)')\n ax.set_ylabel('portion of containers')\n ax.set_xlim([-5, 100])\n\n plt.show()\n\n except FileNotFoundError:\n print('CSV file not found!')\n finally:\n print('finish')\n","sub_path":"src/container_mean_cpu_usage_pdf.py","file_name":"container_mean_cpu_usage_pdf.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"277768580","text":"import json\r\nfrom sklearn.manifold import MDS\r\n\r\ndef loadDataFromFile(path):\r\n with open(path, mode='r', encoding='utf8') as file:\r\n stream = file.read()\r\n data = json.loads(stream)\r\n return data\r\n\r\ndef normalize(matrix):\r\n normalizedMatrix = []\r\n for line in matrix:\r\n newLine = []\r\n for _ in line:\r\n newLine.append(0)\r\n normalizedMatrix.append(newLine)\r\n for columnIndex in range(len(matrix[0])):\r\n minimum = 0\r\n maximum = 1 # prevent from ending up 0, occurring ZeroDivisionError\r\n for line in matrix:\r\n each = line[columnIndex]\r\n minimum = min(minimum,each)\r\n maximum = max(maximum,each)\r\n for lineIndex in range(len(matrix)):\r\n each = matrix[lineIndex][columnIndex]\r\n normalizedMatrix[lineIndex][columnIndex] = (each - minimum) / (maximum - minimum)\r\n return normalizedMatrix\r\n\r\ndef convertData(origin):\r\n data={}\r\n for year in range(1952,2014):\r\n temp={}\r\n for member in origin:\r\n if year==member['Sgnyea'] and member['Prvcnm']!='中国':\r\n values=list(member.values())\r\n values=values[3:]\r\n for i in range(0,20):\r\n if values[i]==\"\":\r\n del values[i]\r\n values.insert(i,0)\r\n if isinstance(values[i],str):\r\n del values[i]\r\n values.insert(i,0)\r\n temp[member['Prvcnm']]=values\r\n data[year]=temp\r\n return data\r\n\r\n \r\nif __name__ == '__main__':\r\n path = \"gdp.json\"\r\n origin = loadDataFromFile(path)\r\n data = convertData(origin)\r\n embedding = MDS(n_components=2)\r\n outputAll = {} # Use a dict to store the output data\r\n for year in data:\r\n labels = [] # Record the ids by order\r\n matrix = []\r\n for city in data[year]:\r\n labels.append(city)\r\n matrix.append(data[year][city])\r\n print(normalize(matrix))\r\n coordinates = embedding.fit_transform(normalize(matrix)) # Get 2-d matrix\r\n output = {}\r\n for i in range(len(labels)):\r\n output[labels[i]] = list(coordinates[i]) # IMPORTANT: output of MDS is not a common list object \\\r\n for city in output:\r\n for k in range(0,2):\r\n if output[city][k]<=-1:\r\n output[city][k]=-0.9\r\n if output[city][k]>=1:\r\n output[city][k]=0.9\r\n \r\n outputAll[year] = output\r\n print(outputAll)\r\n with open(\"MDS.json\", mode='w', encoding='utf8') as file:\r\n json.dump(outputAll,file,ensure_ascii=False)\r\n\r\n pass\r\n","sub_path":"2020年d3寒假实训/MDS.py","file_name":"MDS.py","file_ext":"py","file_size_in_byte":2770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"416554091","text":"def answer(str_S):\n mi = None\n s = int(str_S)\n if s <= 2 :\n mi = 2 if s == 2 else 1\n pos_even = last_appearance(s, EVEN_POSITIONS)\n pos_odd = last_appearance(s, ODD_POSITIONS)\n pos = max(pos_even if pos_even else -1, pos_odd if pos_odd else -1)\n mi = pos if pos >= 0 else None\n return str(mi)\n\nEVEN_POSITIONS = lambda x : 2 * x\nODD_POSITIONS = lambda x : 2 * x + 1\ndef last_appearance(s, pos_trans) :\n lpos, rpos = 0, s\n while lpos < rpos :\n ## narraw n, where f(n) == s, within [lpos, rpos)\n mid = (lpos + rpos) // 2\n pos = pos_trans(mid)\n v = f(pos)\n if v > s :\n rpos = mid\n elif v < s :\n lpos = mid + 1\n else :\n return pos\n return None\n\ndef f(nn) :\n if nn > 2 :\n n = nn // 2\n if nn % 2 == 0 :\n return f2(n) + n\n else :\n return f2(n-1) + 1\n elif nn == 2 :\n return 2\n else :\n return 1\n\ndef f2(nn) :\n if nn > 2 :\n n = nn // 2\n if nn % 2 == 0 :\n return 2*f2(n-1) + df2(n-1) + n + 1\n else :\n return 2*f2(n-1) + df2(n-1) + df2(n) + n + 2\n else :\n return f(nn) + f(nn+1)\n\ndef df2(nn) :\n if nn > 2 :\n n = nn // 2\n if nn % 2 == 0 :\n return df2(n) + 1\n else :\n return df2(n-1)\n else :\n return f(nn+2) - f(nn)\n","sub_path":"breeding_like_rabbits/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"177277998","text":"import glob\nimport requests\nimport pandas as pd\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport cv2\nimport os\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix\nimport tensorflow as tf\nfrom tensorflow.contrib.layers import flatten\nimport sys\nsys.path.append('/home/krishna_warrior/Desktop/traffic_sign_final') #address for pipeline folder\nfrom pipeline import NeuralNetwork, make_adam, Session, build_pipeline #import from pipeline\nmatplotlib.style.use('ggplot')\n\nTRAIN_IMAGE_DIR='/home/krishna_warrior/Desktop/dataset'\ndfs=[]\nfor train_file in glob.glob(os.path.join(TRAIN_IMAGE_DIR,'*/GT-*.csv')):\n folder=train_file.split('/')[5] #actually my path contains 5 elements, configure according to your path\n df=pd.read_csv(train_file,sep=';')\n df['Filename']=df['Filename'].apply(lambda x: os.path.join(TRAIN_IMAGE_DIR,folder,x))\n dfs.append(df)\n\ntrain_df=pd.concat(dfs,ignore_index=True) #storing csv data in train_df\ntrain_df.head();\n#print(train_df['ClassId'])\n#print(dfs)\ndf=pd.DataFrame(train_df)\ndf.to_csv(\"my_data1.csv\",index=False)\n#y= train_df['ClassId'][~np.isnan(train_df['ClassId'])] #this is used to remove nan entries\ndf=pd.DataFrame(train_df['ClassId'])\ndf.to_csv(\"my_data.csv\",index=False)\n\nN_CLASSES=np.unique(train_df['ClassId']).size #no of classes\nprint(\"No. of training images: {:>5}\".format(train_df.shape[0]))\nprint(\"No. of classes: {:>5}\".format(N_CLASSES))\n\ndef class_dist(classIDs,title): #maping data distribution with help of matplotlib\n plt.figure(figsize=(15,5))\n plt.title('class id dist for {}'.format(title))\n plt.hist(classIDs,bins=N_CLASSES)\n plt.show()\n#class_dist(train_df['ClassId'],'Train data')\n\nsign_name_df=pd.read_csv('sign_names.csv',index_col='ClassId') #getting sign name corresponding to classid\nsign_name_df.head()\n#print(sign_name_df)\nsign_name_df['Occurence']=[sum(train_df['ClassId']==c) for c in range(N_CLASSES)] # add new colum 'Occurence' which stores no of images in a class.\nsign_name_df.sort_values('Occurence',ascending=False) #sort occurence in non ascending order\n#print(sign_name_df) #will show id,name,occurence of images.\nSIGN_NAMES=sign_name_df.SignName.values #store sign names value corresponding to id\n#print(SIGN_NAMES[2]) #will print speed limit 50 km/h\n\ndef load_image(image_file):\n return plt.imread(image_file)\ndef get_samples(image_data,num_samples,class_id=None): #prepare sample images(VALIDATION)\n if class_id is not None:\n image_data=image_data[image_data['ClassId']==class_id]\n indices=np.random.choice(image_data.shape[0],size=num_samples,replace=False) #randomly chosing images for validation\n return image_data.iloc[indices][['Filename','ClassId']].values\ndef show_images(image_data,cols=5,sign_names=None,show_shape=False,func=None): #displey all images in matrix form\n num_images=len(image_data)\n rows=num_images//cols\n plt.figure(figsize=(cols*3,rows*2.5))\n for i,(image_file,lable) in enumerate(image_data):\n image=load_image(image_file)\n if func is not None:\n image=func(image)\n plt.subplot(rows,cols,i+1)\n plt.imshow(image)\n if sign_names is not None:\n plt.text(0,0,'{}:{}'.format(lable,sign_names[lable]),color='k',backgroundcolor='c',fontsize=8)\n if show_shape:\n plt.text(0,image.shape[0],'{}'.format(image.shape),color='k',backgroundcolor='y',fontsize=8)\n plt.xticks([])\n plt.yticks([])\n plt.show()\nsample_data=get_samples(train_df,20)\n#show_images(sample_data,sign_names=SIGN_NAMES,show_shape=True)\n#show_images(get_samples(train_df,40,class_id=2),cols=20,show_shape=True)\n#-------------------------------------------------------------------------------------------------------------------\n# Train and validation\nX=train_df['Filename'].values\ny=train_df['ClassId'].values\nprint('X data',len(X))\nX_train, X_valid,y_train, y_valid=train_test_split(X,y,stratify=y,test_size=329,random_state=0) #sorry aashita :) i will use external validation images later\nprint('X_train:',len(X_train))\nprint('X_valid:',len(X_valid))\n#Model implimentation\nINPUT_SHAPE=(32,32,3)\n#pipeline\ndef train_evaluate(pipeline,epochs=10,samples_per_epoch=50000,train=(X_train,y_train),test=(X_valid, y_valid)): #here 50000 is used to increase accuracy(more iterations)\n X,y=train\n learning_curve=[]\n for i in range (epochs):\n indices=np.random.choice(len(X),size=samples_per_epoch) #did u get the use of np.random() ?\n pipeline.fit(X[indices],y[indices])\n scores=[pipeline.score(*train),pipeline.score(*test)]\n learning_curve.append([i,*scores])\n print(\"Epoch: {:>3} Training score: {:.3f} Evaluation score: {:.3f}\".format(i,*scores))\n return np.array(learning_curve).T\n#network1 performance:\ndef resize_image(image,shape=INPUT_SHAPE[:2]):\n return cv2.resize(image,shape)\nloader=lambda image_file: resize_image(load_image(image_file))\n'''with Session() as session:\n functions=[loader]\n pipeline=build_pipeline(functions,session, network1(),make_adam(1.0e-3))\n train_evaluate(pipeline)'''\n#image aug\ndef rBrightness(image,ratio):\n hsv=cv2.cvtColor(image,cv2.COLOR_RGB2HSV)\n brightness=np.float64(hsv[:, :, 2])\n brightness=brightness*(1.0+np.random.uniform(-ratio,ratio))\n brightness[brightness>255]=255\n brightness[brightness<0]=0\n hsv[:, :, 2]=brightness\n return cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)\ndef rRotation(image, angle):\n if angle==0:\n return image\n angle=np.random.uniform(-angle,angle)\n rows,cols=image.shape[:2]\n size=rows,cols\n center=rows/2,cols/2\n scale=1.0\n rotation=cv2.getRotationMatrix2D(center,angle,scale)\n return cv2.warpAffine(image,rotation,size)\ndef rTranslation(image, translation):\n if translation==0:\n return 0\n rows,cols=image.shape[:2]\n size=rows,cols\n x=np.random.uniform(-translation,translation)\n y=np.random.uniform(-translation,translation)\n trans=np.float32([[1,0,x],[0,1,y]])\n return cv2.warpAffine(image,trans,size)\ndef rShear(image, shear):\n if shear==0:\n return image\n rows,cols=image.shape[:2]\n size=rows,cols\n left,right,top,bottom=shear,cols-shear,shear,rows-shear\n dx=np.random.uniform(-shear,shear)\n dy=np.random.uniform(-shear,shear)\n p1=np.float32([[left,top],[right,top],[left,bottom]])\n p2=np.float32([[left+dx,top],[right+dx,top+dy],[left,bottom+dy]])\n move=cv2.getAffineTransform(p1,p2)\n return cv2.warpAffine(image,move,size)\ndef agument_image(image,brightness,angle,translation,shear):\n image=rBrightness(image,brightness)\n image=rRotation(image,angle)\n image=rTranslation(image,translation)\n image=rShear(image,shear)\n return image\naugmenter=lambda x: agument_image(x,0.7,10,5,2)\nshow_images(sample_data[10:],cols=10)\nfor i in range(5):\n show_images(sample_data[10:],cols=10,func=augmenter)\n'''with Session() as session:\n functions=[loader,augmenter]\n pipeline=build_pipeline(functions,session, network1(),make_adam(1.0e-3))\n train_evaluate(pipeline)'''\nnormilzers=[('x-127.5', lambda x: x-127.5),('x/127.5-1.0', lambda x: x/127.5-1.0),\n ('x/225.0-0.5',lambda x: x/225.0-0.5),('x-x.mean()',lambda x: x-x.mean()),\n ('(x-x.mean())/x.std()', lambda x: (x-x.mean())/x.std())]\nnormalizer=lambda x: (x-x.mean())/x.std()\nconverters = [('Gray', lambda x: cv2.cvtColor(x, cv2.COLOR_RGB2GRAY)[:, :, np.newaxis]),\n ('HSV', lambda x: cv2.cvtColor(x, cv2.COLOR_RGB2HSV)),\n ('HLS', lambda x: cv2.cvtColor(x, cv2.COLOR_RGB2HLS)),\n ('Lab', lambda x: cv2.cvtColor(x, cv2.COLOR_RGB2Lab)),\n ('Luv', lambda x: cv2.cvtColor(x, cv2.COLOR_RGB2Luv)),\n ('XYZ', lambda x: cv2.cvtColor(x, cv2.COLOR_RGB2XYZ)),\n ('Yrb', lambda x: cv2.cvtColor(x, cv2.COLOR_RGB2YCrCb)),\n ('YUV', lambda x: cv2.cvtColor(x, cv2.COLOR_RGB2YUV))]\nGRAY_INPUT_SHAPE = (*INPUT_SHAPE[:2], 1)\npreprocessors = [loader,augmenter, normalizer]\ndef show_learning_curve(learning_curve):\n epochs, train, valid = learning_curve\n plt.figure(figsize=(10, 10))\n plt.plot(epochs, train, label='train')\n plt.plot(epochs, valid, label='validation')\n plt.title('Learning Curve')\n plt.ylabel('accuracy')\n plt.xlabel('epochs')\n plt.xticks(epochs)\n plt.legend(loc='center right')\ndef plot_confusion_matrix(cm):\n cm = [row/sum(row) for row in cm]\n fig = plt.figure(figsize=(10, 10))\n ax = fig.add_subplot(111)\n cax = ax.matshow(cm, cmap=plt.cm.Oranges)\n fig.colorbar(cax)\n plt.title('Confusion Matrix')\n plt.xlabel('Predicted Class IDs')\n plt.ylabel('True Class IDs')\n plt.show()\ndef print_confusion_matrix(cm, sign_names=SIGN_NAMES):\n results = [(i, SIGN_NAMES[i], row[i]/sum(row)*100) for i, row in enumerate(cm)]\n accuracies = []\n for result in sorted(results, key=lambda x: -x[2]):\n print('{:>2} {:<50} {:6.2f}% {:>4}'.format(*result, sum(y_train==result[0])))\n accuracies.append(result[2])\n print('-'*50)\n print('Accuracy: Mean: {:.3f} Std: {:.3f}'.format(np.mean(accuracies), np.std(accuracies)))\ndef make_network3(input_shape=INPUT_SHAPE):\n return (NeuralNetwork()\n .input(input_shape)\n .conv([5, 5, 24]) # <== doubled\n .max_pool()\n .relu()\n .conv([5, 5, 64]) # <== doubled\n .max_pool()\n .relu()\n .flatten()\n .dense(480) # <== doubled\n .relu()\n .dense(N_CLASSES))\nwith Session() as session:\n pipeline = build_pipeline(preprocessors, session, make_network3(), make_adam(0.5e-3))\n learning_curve = train_evaluate(pipeline,epochs=20)\n session.save('checkpoint/network3_e_20_lr_0.5e-3.ckpt')\n\nshow_learning_curve(learning_curve)\nwith Session() as session:\n pipeline = build_pipeline(preprocessors, session, make_network3())\n session.load('checkpoint/network3_e_20_lr_0.5e-3.ckpt')\n pred = pipeline.predict(X_valid)\ncm = confusion_matrix(y_valid, pred)\nplot_confusion_matrix(cm)\nprint_confusion_matrix(cm)","sub_path":"networks/network3_lower_lr.py","file_name":"network3_lower_lr.py","file_ext":"py","file_size_in_byte":10274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"308191821","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/insights/parsers/tests/test_iscsiadm_mode_session.py\n# Compiled at: 2019-05-16 13:41:33\nfrom insights.parsers.iscsiadm_mode_session import IscsiAdmModeSession\nfrom insights.tests import context_wrap\nISCSIADM_SESSION_INFO = ('\\ntcp: [1] 10.72.32.45:3260,1 iqn.2017-06.com.example:server1 (non-flash)\\ntcp: [2] 10.72.32.45:3260,1 iqn.2017-06.com.example:server2 (non-flash)\\n').strip()\nEXPECTED_RESULTS = [\n {'IFACE_TRANSPORT': 'tcp', 'SID': '1', \n 'TARGET_IP': '10.72.32.45:3260,1', \n 'TARGET_IQN': 'iqn.2017-06.com.example:server1'},\n {'IFACE_TRANSPORT': 'tcp', 'SID': '2', \n 'TARGET_IP': '10.72.32.45:3260,1', \n 'TARGET_IQN': 'iqn.2017-06.com.example:server2'}]\n\ndef test_iscsiadm_session_info():\n iscsiadm_session_info = IscsiAdmModeSession(context_wrap(ISCSIADM_SESSION_INFO))\n assert iscsiadm_session_info.data == EXPECTED_RESULTS\n assert len(iscsiadm_session_info.data) == 2\n assert iscsiadm_session_info[0] == {'IFACE_TRANSPORT': 'tcp', \n 'SID': '1', \n 'TARGET_IP': '10.72.32.45:3260,1', \n 'TARGET_IQN': 'iqn.2017-06.com.example:server1'}\n assert iscsiadm_session_info[1]['TARGET_IQN'] == 'iqn.2017-06.com.example:server2'","sub_path":"pycfiles/insights_core-3.0.161-py2.7/test_iscsiadm_mode_session.py","file_name":"test_iscsiadm_mode_session.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"135619159","text":"\"\"\"\nsaves 64x64 pixel cutouts of each source in a Data Release as HDF5 files\n\"\"\"\n\nimport numpy as np\nimport os\nfrom glob import glob\nimport h5py\nimport pandas as pd\n\nfrom obiwan.qa.visual import readImage,sliceImage\nfrom obiwan.common import dobash\n\n# try:\nfrom astrometry.util.fits import fits_table\nfrom legacypipe.survey import LegacySurveyData, wcs_for_brick\nimport galsim\n# except ImportError:\n# pass\n\nHDF5_KEYS= ['g','r','z','gr','gz','rz','grz']\n\ndef flux2mag(nmgy):\n return -2.5 * (np.log10(nmgy) - 9)\n\nclass SimStamps(object):\n \"\"\"Object for exracting sim cutouts\n\n Args:\n ls_dir: LEGACY_SURVEY_DIR, like 'tests/end_to_end/testcase_DR5_grz'\n outdir: path to dir containing obiwan,coadd,tractor dirs\n \"\"\"\n\n def __init__(self,ls_dir=None,outdir=None,\n savedir=None, jpeg=False):\n \"\"\"outdir: required\n ls_dir: not needed if env var LEGACY_SURVEY_DIR already set\n save_dir: where write hdf5 files, outdir if None\n \"\"\"\n self.outdir= outdir\n self.jpeg= jpeg\n if ls_dir:\n os.environ[\"LEGACY_SURVEY_DIR\"]= ls_dir\n self.savedir= savedir\n if self.savedir is None:\n self.savedir= self.outdir\n self.survey = LegacySurveyData()\n\n def get_brickwcs(self,brick):\n brickinfo = self.survey.get_brick_by_name(brick)\n self.brickwcs = wcs_for_brick(brickinfo)\n\n def load_data(self,brick,cat_fn,coadd_dir):\n \"\"\"loads coadd and catalogue data\n\n Args:\n brick:\n coadd_dir: path/to/rs0, rs300, rs300_skipid, etc\n \"\"\"\n print('Loading ra,dec from %s' % (os.path.dirname))\n self.cat= fits_table(cat_fn)\n\n print('Loading from %s' % coadd_dir)\n self.img_fits,self.ivar_fits= {},{}\n for b in self.bands:\n self.img_fits[b]= readImage(os.path.join(coadd_dir,\n 'legacysurvey-%s-image-%s.fits.fz' % (brick,b)))\n self.ivar_fits[b]= readImage(os.path.join(coadd_dir,\n 'legacysurvey-%s-invvar-%s.fits.fz' % (brick,b)))\n self.img_jpeg= readImage(os.path.join(coadd_dir,\n 'legacysurvey-%s-image.jpg' % (brick)),\n jpeg=True)\n # galsim.Image() so can determine overlap w/cutouts\n self.img_gs,self.ivar_gs,self.jpeg_gs= {},{},{}\n for ib,b in enumerate(self.bands):\n self.img_gs[b]= galsim.Image(self.img_fits[b])\n self.ivar_gs[b]= galsim.Image(self.ivar_fits[b])\n\n def extract(self,hw=32):\n \"\"\"For each id,x,y in self.cat, extracts image cutout\n\n Args:\n hw: half-width, pixels, (hw*2) x (hw*2) image cutout\n \"\"\"\n for cat in self.cat:\n xslc= slice(cat.x-hw,cat.x+hw)\n yslc= slice(cat.y-hw,cat.y+hw)\n # N x N x Number of bands\n test_img= galsim.Image(np.zeros((2*hw+1,2*hw+1)))\n # y,x because numpy indexing\n test_img.setCenter(cat.x,cat.y)\n olap= test_img.bounds & self.img_gs[self.bands[0]].bounds\n assert(olap.area() > 0)\n if olap.numpyShape() == test_img.array.shape:\n # Grab from fits image b/c aligned better\n _ = self.hdf5_obj.create_dataset(str(cat.id)+'/img',\n chunks=True, \\\n data= np.array([sliceImage(self.img_fits[band],\n xslice=xslc,yslice=yslc)\n for band in self.bands_str]).T)\n _ = self.hdf5_obj.create_dataset(str(cat.id)+'/ivar',\n chunks=True, \\\n data= np.array([sliceImage(self.ivar_fits[band],\n xslice=xslc,yslice=yslc)\n for band in self.bands_str]).T)\n\n _ = self.hdf5_jpeg.create_dataset(str(cat.id)+'/img',\n chunks=True,dtype=np.uint8, \\\n data= sliceImage(self.img_jpeg,\n xslice=xslc,yslice=yslc))\n # _ = self.hdf5_obj.create_dataset(str(cat.id)+'/img',\n # chunks=True, \\\n # data= np.array([self.img_fits[band][olap].array\n # for band in self.bands_str]).T)\n # _ = self.hdf5_obj.create_dataset(str(cat.id)+'/ivar',\n # chunks=True, \\\n # data= np.array([self.ivar_fits[band][olap].array\n # for band in self.bands_str]).T)\n\n else:\n # On edge\n # Note, galsim.Image() cannot be 3D\n img= [test_img.copy()]*len(self.bands)\n ivar= [test_img.copy()]*len(self.bands)\n for i,band in enumerate(self.bands_str):\n img[i][olap] += self.img_gs[band][olap]\n ivar[i][olap] += self.ivar_gs[band][olap]\n\n _ = self.hdf5_obj_onedge.create_dataset(str(cat.id)+'/img',\n chunks=True, \\\n data= np.array([d.array for d in img]).T)\n _ = self.hdf5_obj_onedge.create_dataset(str(cat.id)+'/ivar',\n chunks=True, \\\n data= np.array([d.array for d in ivar]).T)\n\n def run(self,brick,stampSize=64,applyCuts=True,zoom=None):\n \"\"\"Write the hdf5 image files for all rs/* in this brick\n\n Args:\n brick: brickname\n stampSize: height and width in pixes of training image\n zoom: if legacypipe was run with zoom option\n \"\"\"\n self.get_brickwcs(brick)\n # coadd fits images must exist\n self.set_paths_to_data(brick)\n coadd_fns= glob(os.path.join(self.coadd_dirs[0],\n '*-image-*.fits.fz'))\n if len(coadd_fns) == 0:\n raise IOError('no image.fits.fz file here: %s' % self.coadd_dirs[0])\n # set of bands in this brick\n self.bands= (pd.Series(coadd_fns)\n .str.replace('.fits.fz','')\n .str[-1].values)\n assert(self.bands.size > 0)\n # One hdf5 file for this brick, like '_rz.hdf5'\n self.bands_str= ''.join(sorted(self.bands))\n assert(self.bands_str in HDF5_KEYS)\n self.set_output_fns(brick,self.bands_str)\n\n if os.path.exists(self.hdf5_fn):\n try:\n f= h5py.File(self.hdf5_fn, 'r')\n f2= h5py.File(self.hdf5_fn_onedge, 'r')\n f3= h5py.File(self.hdf5_fn_jpeg, 'r')\n if (len(f.keys()) == 0) & (len(f2.keys()) == 0):\n # remove empty files then make them\n for fn in [self.hdf5_fn,self.hdf5_fn_onedge,self.hdf5_fn_jpeg]:\n dobash('rm %s' % fn)\n else:\n # processing done, skip this brick\n print('Skipping %s, hdf5 already filled: %s' % (brick,self.hdf5_fn))\n return None\n except OSError:\n # One of these got messed up, redo it\n for fn in [self.hdf5_fn,self.hdf5_fn_onedge,self.hdf5_fn_jpeg]:\n os.remove(fn)\n print('removed ',fn)\n self.hdf5_obj = h5py.File(self.hdf5_fn, \"w\")\n self.hdf5_obj_onedge = h5py.File(self.hdf5_fn_onedge, \"w\")\n self.hdf5_jpeg = h5py.File(self.hdf5_fn_jpeg, \"w\")\n # Many rs*/ dirs per brick\n for cat_fn,coadd_dir in zip(self.cat_fns,self.coadd_dirs):\n if (self.jpeg) & (os.path.basename(coadd_dir) != 'rs0'):\n print('jpeg=True, so skipping %s' % coadd_dir)\n continue\n self.load_data(brick,cat_fn,coadd_dir)\n self.set_xyid(zoom=zoom)\n if applyCuts:\n self.apply_cuts()\n self.write_mag_sorted_ids()\n self.extract(hw=int(stampSize/2))\n self.hdf5_obj.close()\n self.hdf5_obj_onedge.close()\n self.hdf5_jpeg.close()\n print('Wrote %s' % self.hdf5_fn)\n print('Wrote %s' % self.hdf5_fn_onedge)\n print('Wrote %s' % self.hdf5_fn_jpeg)\n\n def set_paths_to_data(self,brick):\n \"\"\"lists of catalogues filenames and coadd dirs\"\"\"\n search= os.path.join(self.outdir,'coadd',\n brick[:3],brick,'*rs*',\n 'legacysurvey-%s-ccds.fits' % brick)\n rs_dirs= glob(search)\n rs_dirs= [os.path.basename(os.path.dirname(a))\n for a in rs_dirs]\n if len(rs_dirs) == 0:\n raise IOError('No rs dirs here: %s' % search)\n self.cat_fns,self.coadd_dirs= \\\n zip(*[(os.path.join(self.outdir,'obiwan',\n brick[:3],brick,rs_dir,\n 'simcat-elg-%s.fits' % brick),\n os.path.join(self.outdir,'coadd',\n brick[:3],brick,rs_dir)\n )\n for rs_dir in rs_dirs])\n\n def set_output_fns(self,brick,bands_str):\n dr= os.path.join(self.savedir,'hdf5',\n brick[:3],brick)\n # hdf5\n self.hdf5_fn= os.path.join(dr,\n 'img_ivar_%s.hdf5' % bands_str)\n self.hdf5_fn_onedge= self.hdf5_fn.replace('.hdf5',\n '_onedge.hdf5')\n self.hdf5_fn_jpeg= self.hdf5_fn.replace('img_ivar_','jpeg_')\n # table of mag sorted ids\n self.sorted_ids_fn= self.hdf5_fn.replace(os.path.basename(self.hdf5_fn),\n 'sorted_ids.fits')\n try:\n dobash('mkdir -p %s' % dr)\n except ValueError:\n print('hdf5 dir already exists: ',dr)\n\n def set_xyid(self,zoom=None):\n _,x,y=self.brickwcs.radec2pixelxy(self.cat.ra,self.cat.dec)\n if zoom:\n x -= zoom[0]\n y -= zoom[2]\n self.cat.set('x',x.astype(int))\n self.cat.set('y',y.astype(int))\n assert('id' in self.cat.get_columns())\n\n def apply_cuts(self):\n len_bef=len(self.cat)\n print('After cut, have %d/%d' % (len(self.cat),len_bef))\n\n def write_mag_sorted_ids(self,band='g'):\n mag= self.get_mag(band)\n inds= np.argsort(mag) # small to large (mag, so brightest to faintest)\n T= fits_table()\n T.set('id',self.cat.id)\n T.set('mag_'+band,mag)\n T= T[inds]\n T.writeto(self.sorted_ids_fn)\n print('Wrote %s' % self.sorted_ids_fn)\n\n def get_mag(self,band='g'):\n return flux2mag(self.cat.get(band+'flux'))\n\n\n#######\n# Funcs to apply simulated source cuts to tractor catalogues sources\n\ndef get_xy_pad(slope,pad):\n \"\"\"Returns dx,dy\"\"\"\n theta= np.arctan(abs(slope))\n return pad*np.sin(theta), pad*np.cos(theta)\n\ndef y1_line(rz,pad=None):\n slope,yint= 1.15,-0.15\n if pad:\n dx,dy= get_xy_pad(slope,pad)\n return slope*(rz+dx) + yint + dy\n else:\n return slope*rz + yint\n\ndef y2_line(rz,pad=None):\n slope,yint= -1.2,1.6\n if pad:\n dx,dy= get_xy_pad(slope,pad)\n return slope*(rz-dx) + yint + dy\n else:\n return slope*rz + yint\n\ndef get_ELG_box(rz,gr, pad=None):\n \"\"\"\n Args:\n rz: r-z\n gr: g-r\n pad: magnitudes of padding to expand TS box\n \"\"\"\n x1,y1= rz,y1_line(rz)\n x2,y2= rz,y2_line(rz)\n x3,y3= np.array([0.3]*len(rz)),gr\n x4,y4= np.array([1.6]*len(rz)),gr\n if pad:\n dx,dy= get_xy_pad(1.15,pad)\n x1,y1= x1-dx,y1+dy\n dx,dy= get_xy_pad(-1.2,pad)\n x2,y2= x2+dx,y2+dy\n x3 -= pad\n x4 += pad\n return dict(x1=x1, y1=y1,\n x2=x2, y2=y2,\n x3=x3, y3=y3,\n x4=x4, y4=y4)\n#####\n\nclass TractorStamps(SimStamps):\n def __init__(self,ls_dir=None,outdir=None,\n savedir=None, jpeg=False):\n \"\"\"Same as SimStamps but for tractor catalogues\n\n Args:\n savedir: required for tractor not sims b/c cannot write to dr5 dir\n \"\"\"\n super(TractorStamps,self).__init__(ls_dir=ls_dir,\n outdir=outdir,\n savedir=savedir,\n jpeg=jpeg)\n\n def set_paths_to_data(self,brick):\n \"\"\"lists of catalogues filenames and coadd dirs\"\"\"\n self.cat_fns= [os.path.join(self.outdir,'tractor',\n brick[:3],\n 'tractor-%s.fits' % brick)]\n self.coadd_dirs= [os.path.join(self.outdir,'coadd',\n brick[:3],brick)]\n if ((not os.path.exists(self.cat_fns[0])) |\n (not os.path.exists(self.coadd_dirs[0]))):\n raise OSError('does not exist: %s OR %s' % \\\n (self.cat_fns[0],self.coadd_dirs[0]))\n\n def set_xyid(self,zoom=None):\n x,y=self.cat.bx,self.cat.by\n if zoom:\n x -= zoom[0]\n y -= zoom[2]\n self.cat.set('x',x.astype(int))\n self.cat.set('y',y.astype(int))\n self.cat.set('id',self.cat.objid)\n\n def get_mag(self,band='g'):\n return flux2mag(self.cat.get('flux_'+band)/self.cat.get('mw_transmission_'+band))\n\n\n def apply_cuts(self):\n # Need extinction correction mag and colors\n d= {}\n for b in 'grz':\n d[b]= flux2mag(self.cat.get('flux_'+b)/self.cat.get('mw_transmission_'+b))\n df= pd.DataFrame(d)\n df['g-r']= df['g'] - df['r']\n df['r-z']= df['r'] - df['z']\n\n hasGRZ= ((self.cat.brick_primary) &\n (self.cat.nobs_g >= 1) &\n (self.cat.nobs_r >= 1) &\n (self.cat.nobs_z >= 1))\n noArtifacts= ((self.cat.allmask_g == 0) &\n (self.cat.allmask_r == 0) &\n (self.cat.allmask_z == 0))\n\n keep= ((self.sim_sampling_cut(df)) &\n (self.isFaint_cut(df)) &\n #(noArtifacts) &\n (hasGRZ))\n\n len_bef= len(self.cat)\n self.cat.cut(keep)\n print('After cut, have %d/%d' % (len(self.cat),len_bef))\n\n\n def sim_sampling_cut(self,df):\n \"\"\"same cut applied to simulated sources\n\n Args:\n df: pd.DataFrame have tractor cat extinction corrected grz mags\n \"\"\"\n # TS box w/0.5 mag padding\n inBox= ((df['g-r'] <= y1_line(df['r-z'],pad=0.5)) &\n (df['g-r'] <= y2_line(df['r-z'],pad=0.5)) &\n (df['r-z'] >= 0.3 - 0.5) &\n (df['r-z'] <= 1.6 + 0.5))\n\n # Effective rhalf and drop comp, dev\n fwhm_or_rhalf= np.zeros(len(self.cat))-1 # arcsec\n isPSF= np.char.strip(self.cat.type) == 'PSF'\n isEXP= pd.Series(np.char.strip(self.cat.type)).isin(['EXP','REX'])\n isDEV= np.char.strip(self.cat.type) == 'DEV'\n isCOMP= np.char.strip(self.cat.type) == 'COMP'\n # rhalf ~ fwhm/2\n fwhm_or_rhalf[isPSF]= np.mean(np.array([self.cat[isPSF].psfsize_g,\n self.cat[isPSF].psfsize_r,\n self.cat[isPSF].psfsize_z]),axis=0)/2\n fwhm_or_rhalf[isEXP]= self.cat[isEXP].shapeexp_r\n fwhm_or_rhalf[isDEV]= self.cat[isDEV].shapedev_r\n\n grz_gt0= ((self.cat.flux_g > 0) &\n (self.cat.flux_r > 0) &\n (self.cat.flux_z > 0) &\n (self.cat.flux_ivar_g > 0) &\n (self.cat.flux_ivar_r > 0) &\n (self.cat.flux_ivar_z > 0))\n\n keep= ((grz_gt0) &\n (isCOMP == False) &\n (isDEV == False) &\n (fwhm_or_rhalf < 5))\n\n #Last cut\n rhalf_lim= (0.262/2,2.) # Camera, Data\n g,r,z= tuple(np.array([24.0,23.4,22.5])+0.5)\n bad= ((fwhm_or_rhalf < rhalf_lim[0]) |\n (fwhm_or_rhalf > rhalf_lim[1]) |\n (df['z'] >= z) | #beyond mag limit\n (df['r'] >= r) |\n (df['g'] >= g))\n\n return (inBox) & (keep) & (bad == False)\n\n\n\n def isFaint_cut(self,df):\n \"\"\"There are only faint sources in the deep2 matched sample,\n but in the tractor catalogus have a bright population presumably\n stars. Remove these\n\n Args:\n df: pd.DataFrame have tractor cat extinction corrected grz mags\n \"\"\"\n # \"elg_sample_5dim_10k.fits\"\n min_mag= {'r': 20.4659,\n 'z': 19.4391,\n 'g': 20.6766}\n keep= ((df['g'] >= min_mag['g']) &\n (df['r'] >= min_mag['r']) &\n (df['z'] >= min_mag['z']))\n return keep\n\nclass UserDefinedStamps(SimStamps):\n def __init__(self,ls_dir=None,outdir=None,\n savedir=None, jpeg=False):\n \"\"\"Same as SimStamps but for tractor catalogues\n\n Args:\n savedir: required for tractor not sims b/c cannot write to dr5 dir\n \"\"\"\n super().__init__(ls_dir=ls_dir,\n outdir=outdir,\n savedir=savedir,\n jpeg=jpeg)\n\n def set_paths_to_data(self,brick):\n \"\"\"lists of catalogues filenames and coadd dirs\"\"\"\n self.cat_fns= [os.path.join(self.savedir,'%s.fits' % brick)]\n self.coadd_dirs= [os.path.join(self.outdir,'coadd',\n brick[:3],brick)]\n if ((not os.path.exists(self.cat_fns[0])) |\n (not os.path.exists(self.coadd_dirs[0]))):\n raise OSError('does not exist: %s OR %s' % \\\n (self.cat_fns[0],self.coadd_dirs[0]))\n\n\ndef testcase_main():\n name= 'testcase_DR5_grz'\n obj='elg'\n onedge=False\n if '_grz' in name:\n brick='0285m165'\n zoom= [3077, 3277, 2576, 2776]\n else:\n brick='1741p242'\n zoom= [90, 290, 2773, 2973]\n\n repo_dir= '/home/kaylan/myrepo/obiwan/'\n ls_dir= os.path.join(repo_dir,\n 'tests/end_to_end',name)\n obj_dir= os.path.join(repo_dir,\n 'tests/end_to_end','out_'+name+'_'+obj)\n if onedge:\n obj_dir += '_onedge'\n\n Sim= SimcatStamps(ls_dir=ls_dir, obj_dir=obj_dir)\n for brick in [brick]:\n Sim.run(brick, zoom=zoom)\n\ndef mpi_main(nproc=1,which=None,\n outdir=None,ls_dir=None,savedir=None,\n jpeg=False,\n bricks=[]):\n \"\"\"\n\n Args:\n nproc: > 1 for mpi4py\n which: one of ['tractor','sim','userDefined']\n outdir: path to coadd,tractor dirs\n ls_dir: not needed if legacy_survey_dir env var already set\n savedir: where to write the hdf5 files, outdir if None\n jpeg: extract .jpg instead of .fits\n bricks: list bricks to make hdf5 cutouts from\n \"\"\"\n assert(which in ['tractor','sim','userDefined'])\n if nproc > 1:\n from mpi4py.MPI import COMM_WORLD as comm\n bricks= np.array_split(bricks, comm.size)[comm.rank]\n\n d= dict(outdir=outdir,ls_dir=ls_dir,\n savedir=savedir, jpeg=jpeg)\n kwargs={}\n if which == 'sim':\n Obj= SimStamps(**d)\n elif which == 'tractor':\n Obj= TractorStamps(**d)\n elif which == 'userDefined':\n Obj= UserDefinedStamps(**d)\n kwargs.update(stampSize=64,\n applyCuts=False)\n\n for brick in bricks:\n Obj.run(brick)\n\n\nif __name__ == '__main__':\n #testcase_main()\n from argparse import ArgumentParser\n parser = ArgumentParser()\n parser.add_argument('--which', type=str, choices=['tractor','sim','userDefined'], required=True, help='whether to make training hdf5 cutouts of real (DR5) or simulated (elg_dr5_coadds) outputs')\n parser.add_argument('--nproc', type=int, default=1, help='set to > 1 to run mpi4py')\n parser.add_argument('--bricks_fn', type=str, default=None, help='specify a fn listing bricks to run, or a single default brick will be ran')\n parser.add_argument('--savedir', type=str, default=None, help='specify a fn listing bricks to run, or a single default brick will be ran')\n parser.add_argument('--jpeg', action='store_true', default=False, help='put jpeg images in hdf5 file instead of fits coadss')\n args = parser.parse_args()\n\n # Data paths\n d= dict(savedir=args.savedir,\n jpeg=args.jpeg)\n if os.environ['HOME'] == '/home/kaylan':\n # ubuntu\n d.update(ls_dir=os.path.join(os.environ['HOME'],\n 'mydata/legacysurveydir'))\n if args.which == 'sim':\n d.update(outdir=os.path.join(os.environ['HOME'],\n 'mydata/elg_dr5_coadds'))\n elif args.which == 'tractor':\n d.update(outdir=os.path.join(os.environ['HOME'],\n 'mydata/dr5_cutouts'))\n if args.savedir is None:\n d.update(savedir=os.path.join(os.environ['HOME'],\n 'mydata/dr5_hdf5'))\n else:\n # nersc\n if args.which == 'sim':\n d.update(outdir=os.path.join(os.environ['CSCRATCH'],\n 'obiwan_out/elg_dr5_coadds'))\n elif args.which == 'tractor':\n d.update(outdir='/global/project/projectdirs/cosmo/data/legacysurvey/dr5')\n if not args.savedir:\n d.update(savedir=os.path.join(os.environ['CSCRATCH'],\n 'obiwan_out/dr5_hdf5'))\n elif args.which == 'userDefined':\n d.update(outdir='/global/project/projectdirs/cosmo/data/legacysurvey/dr5')\n if not args.savedir:\n d.update(savedir=os.path.join(os.environ['CSCRATCH'],\n 'obiwan_out/dr5_hdf5'))\n\n # Bricks to run\n if not args.bricks_fn:\n bricks= ['1211p060'] #['1126p220']\n else:\n bricks= np.loadtxt(args.bricks_fn,dtype=str)\n\n mpi_main(nproc=args.nproc,which=args.which,bricks=bricks,\n **d)\n","sub_path":"py/obiwan/dplearn/create_training.py","file_name":"create_training.py","file_ext":"py","file_size_in_byte":22304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"419734909","text":"#!/usr/bin/python\n\n#----------------------------------------------------------------------\n# Markdown to HTML Translator\n# Author: Ricky Dall'Armellina\n# Date: 09/3/2018\n#\n# Description: Translates a text file written in Markdown to a working\n# HTML document and saves it with the same name\n#----------------------------------------------------------------------\nDEBUG = False\n\nimport os\nimport markdown\nimport markdown_extensions\n\ndef LOG(string):\n if DEBUG:\n print(string)\n\ndef getFileName(fileName):\n #get a substring with just the file name, no extension\n name = fileName[:-3]\n #print(name)\n return name\n\ndef checkExistingFile(fileName):\n htmlFileName = getFileName(fileName) + \".html\"\n #check if file exits and delete it\n fileExists = os.path.isfile(htmlFileName)\n if fileExists:\n LOG(\"Removing previously created html file\")\n os.remove(htmlFileName)\n else:\n LOG(\"HTML file doesn't exist, continuing...\")\n\ndef generateHTMLHeader(file, mode):\n #generates html opening and closing elements\n #take the html file to ad headers to and a mode as an argument\n #mode 'o' (opening statements)\n #mode 'c' (closing statements)\n if mode == 'o':\n headFile = open(\"html_head.txt\", \"r\", 0)\n file.write(headFile.read())\n headFile.close()\n elif mode == 'c':\n file.write(\"\\n\\n\")\n return file\n\ndef createHTML(fileName):\n #get name from getFileName() and create html file with that name\n LOG(\"Creating HTML file...\")\n htmlName = getFileName(fileName) + \".html\"\n html = open(htmlName, \"w+\", 0)\n html = generateHTMLHeader(html, 'o')\n return html\n\ndef parseMarkdown(mFile, htmlFile):\n #call Markdown library to open the textFile and read it\n input_file = open(mFile, \"r\", 0)\n text = input_file.read()\n #input the contents of the markdown file into the html file\n html = markdown.markdown(text, ['markdown_extensions'])\n #write the converted html text to the html file\n htmlFile.write(html)\n return htmlFile\n\ndef markdown2html(inputFile):\n LOG(\"- Markdown to HTML Python Translator -\\n\")\n #create html file\n htmlFile = createHTML(inputFile)\n #parse and analyze the markdown file and obtain the html file\n outputFile = parseMarkdown(inputFile, htmlFile)\n generateHTMLHeader(outputFile, \"c\")\n return outputFile\n","sub_path":"Tests/File copy and MD Checker/markdown2html.py","file_name":"markdown2html.py","file_ext":"py","file_size_in_byte":2374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"42029970","text":"import os as os\n\npath = '/Users/alistairmackinnon/Desktop/trial'\n\npath = os.path.join(path, 'xx.txt')\n\nprint(path)\n\nfile = open(path, 'w')\nfor i in range(5):\n file.write('Line x {}\\n'.format(i+1))\nfile.close()\n\ncontent = open(path)\ncontent = content.read()\nfile.close()\n\nprint(content)\n\n\n\"\"\"\nfile = open(file, 'w')\nfile.write('Hello World')\nfile.close()\nfile = open(file)\nprint(file)\n\"\"\"\n\n\n\n\n\n","sub_path":"read_and_write.py","file_name":"read_and_write.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"73375349","text":"#!/usr/bin/env python\n\nfrom collections import Counter\nfrom itertools import permutations\n\naoc = open('inputs/day_02.txt').read().strip().splitlines()\nchallenge_input = map((lambda x: Counter(x)), aoc)\n\ntwice = 0\nthrice = 0\n\n\ndef only_1_diff(str1, str2):\n found = False\n for a, b in zip(str1, str2):\n if a != b and found:\n return False\n elif a != b:\n found = True\n\n return found\n\n\ndef find_correct_pair():\n for str1, str2 in permutations(aoc, 2):\n if only_1_diff(str1, str2):\n return str1, str2\n\n\nfor boxId in challenge_input:\n double = [l for l, c in boxId.most_common() if c == 2]\n triple = [l for l, c in boxId.most_common() if c == 3]\n\n if double:\n twice += 1\n\n if triple:\n thrice += 1\n\npart1 = twice * thrice\nprint('Day 02 part 1: %s' % part1)\n\nstr1, str2 = find_correct_pair()\ncommon = [x for x, y in zip(str1, str2) if x == y]\n\npart2 = ''.join(common)\nprint('Day 02 part 2: %s' % part2)\n\n","sub_path":"2018/day_02.py","file_name":"day_02.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"627586174","text":"import math\nwhile 1:\n\ttry:\n\t\tx = input()\n\t\tx = x.split(' ')\n\t\tx1 = int(x[0])\n\t\ty1 = int(x[1])\n\t\tx2 = int(x[2])\n\t\ty2 = int(x[3])\n\t\tv = int(x[4])\n\t\tr1 = int(x[5])\n\t\tr2 = int(x[6])\n\n\t\t#distancia entre 2 pontos\n\t\tdab = math.sqrt((x2 - x1)**2+(y2 - y1)**2)\n\t\t\n\t\t#soma a distancia a quantidade de metros percorrida pelo adversario em um intervalo de 1.5 segundos \n\t\tdab += v * 1.5\n\n\t\t#raio de ação do ataque \n\t\tra = r1 + r2\n\n\t\tif dab > float(ra):\n\t\t\tprint('N')\n\t\telse:\n\t\t\tprint('Y')\n\texcept:\n\t\tbreak\n","sub_path":"python/2203.py","file_name":"2203.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"490075229","text":"import requests\nimport json\n\n\nclass HueException(Exception):\n pass\n\n\nclass Hue(object):\n def __init__(self, **kwargs):\n self.url = kwargs.pop('url', None)\n self.api_key = kwargs.pop('api_key', None)\n\n def process_errors(self, resp):\n errors = [m['error']['description'] for m in resp if 'error' in m]\n if errors:\n raise HueException(\"\\n\".join(errors))\n\n def get_light_id(self, **kwargs):\n light_found = False\n light_name = kwargs.pop('light_name', None)\n url = \"%s/api/%s/lights\" % (self.url, self.api_key)\n r = requests.get(url)\n if r.status_code != 200:\n raise HueException(\n \"Recieved %s status code from url %s \") % (r.status_code, url)\n self.process_errors(json.loads(r.text))\n lights = json.loads(r.text)\n for light in lights:\n name = lights[light].get('name')\n if name == light_name:\n light_found = True\n return light\n if not light_found:\n raise HueException('light not found')\n\n def change_state(self, **kwargs):\n light_id = kwargs.pop('light_id', None)\n alert = kwargs.pop('alert', None)\n payload = {}\n on = kwargs.pop('on', None)\n bri = kwargs.pop('brightness', None)\n if alert:\n payload['alert'] = 'lselect'\n if on:\n payload['on'] = on\n if bri:\n payload['bri'] = bri\n payload['on'] = True\n url = \"%s/api/%s/lights/%s/state\" % (self.url, self.api_key, light_id)\n r = requests.put(url, data=json.dumps(payload))\n if r.status_code != 200:\n raise HueException(\n \"Recieved %s status code from url %s \") % (r.status_code, url)\n self.process_errors(json.loads(r.text))\n success = [s['success'] for s in json.loads(r.text) if 'success' in s]\n return success\n\n def off(self, **kwargs):\n light_id = kwargs.pop('light_id', None)\n if not light_id:\n light_name = kwargs.pop('light_name', None)\n light_id = self.get_light_id(light_name=light_name)\n self.change_state(on=False, light_id=light_id)\n\n def on(self, **kwargs):\n brightness = kwargs.pop('brightness', None)\n alert = kwargs.pop('alert', None)\n light_id = kwargs.pop('light_id', None)\n if not light_id:\n light_name = kwargs.pop('light_name', None)\n light_id = self.get_light_id(light_name=light_name)\n self.change_state(on=True, light_id=light_id, brightness=brightness,\n alert=alert)\n","sub_path":"hue.py","file_name":"hue.py","file_ext":"py","file_size_in_byte":2645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"520280198","text":"#!/usr/bin/python\n\nimport threading\nfrom SocketServer import ThreadingMixIn\nfrom BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler\nimport random\nimport string\nimport ssl\n\nclass Handler(BaseHTTPRequestHandler):\n\n def do_GET(self):\n self.send_response(200)\n self.end_headers()\n message = threading.currentThread().getName()\n msglen = int(random.random() * 100)\n message += ': ' + ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(msglen))\n self.wfile.write(message)\n self.wfile.write('\\n')\n return\n\nclass ThreadedHTTPServer(ThreadingMixIn, HTTPServer):\n \"\"\"Handle requests in a separate thread.\"\"\"\n\ndef serve_on_port(port, ssl_on):\n server = ThreadedHTTPServer(('localhost', port), Handler)\n if ssl_on == True:\n server.socket = ssl.wrap_socket(server.socket, keyfile='key.pem', certfile='cert.pem', server_side=True)\n server.serve_forever()\n\nif __name__ == '__main__':\n threading.Thread(target=serve_on_port, args=[80, False]).start()\n threading.Thread(target=serve_on_port, args=[443, True]).start()\n \n","sub_path":"web-server.py","file_name":"web-server.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"527859170","text":"import scrapy\n\nclass BrickSpider(scrapy.Spider):\n name = \"brickset_spider\"\n start_urls = ['http://brickset.com/sets/year-2016']\n\n def parse(self, response):\n SET_SELECTOR = '.set'\n for bset in response.css(SET_SELECTOR):\n NAME_SELECTOR = 'h1 ::text'\n yield {\n 'name': bset.css(NAME_SELECTOR).extract_first()\n }","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"160613664","text":"import copy\nimport torch\nimport torch.nn as nn\n\nfrom agents.Base_Agent import Base_Agent\nfrom agents.DQN_agents.DQN import DQN\n\nfrom agents.DQN_agents.base_conv_net.mobilenet_v2 import MobileNetV2\nfrom nn_builder.pytorch.NN import NN\nfrom exploration_strategies.Epsilon_Greedy_Exploration import Epsilon_Greedy_Exploration\nfrom utilities.data_structures.Replay_Buffer import Replay_Buffer\nimport torch.optim as optim\n\nimport math\nimport os\nfrom environments.carla_enviroments.carla_config import base_config\n\nimport time\n\nclass DQN_With_Fixed_Q_Targets(DQN):\n \"\"\"A DQN agent that uses an older version of the q_network as the target network\"\"\"\n agent_name = \"DQN with Fixed Q Targets\"\n def __init__(self, config):\n DQN.__init__(self, config)\n self.q_network_target = self.create_NN(input_dim=self.state_size, output_dim=self.action_size)\n Base_Agent.copy_model_over(from_model=self.q_network_local, to_model=self.q_network_target)\n\n if config.resume:\n self.load_resume(config.resume_path)\n\n def learn(self, experiences=None):\n \"\"\"Runs a learning iteration for the Q network\"\"\"\n tic1 = time.time()\n super(DQN_With_Fixed_Q_Targets, self).learn(experiences=experiences)\n tic2 = time.time()\n self.soft_update_of_target_network(self.q_network_local, self.q_network_target,\n self.hyperparameters[\"tau\"]) # Update the target network\n tic3 = time.time()\n print('learn time:%.5f, soft copy:%.5f'%(tic2 - tic1, tic3 - tic2))\n\n def compute_q_values_for_next_states(self, next_states):\n \"\"\"Computes the q_values for next state we will use to create the loss to train the Q network\"\"\"\n Q_targets_next = self.q_network_target(next_states).detach().max(1)[0].unsqueeze(1)\n return Q_targets_next\n\n\n def load_resume(self, resume_path):\n save = torch.load(resume_path)\n q_network_local_dict = save['q_network_local']\n q_network_target_dict = save['q_network_target']\n self.q_network_local.load_state_dict(q_network_local_dict, strict=True)\n self.q_network_target.load_state_dict(q_network_target_dict, strict=True)\n self.logger.info('load resume model success...')\n\n\nclass q_network_2_EYE(nn.Module):\n def __init__(self, n_action):\n super(q_network_2_EYE, self).__init__()\n self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n self.backbone = MobileNetV2(width_mult=0.35, n_dim=128)\n\n ## action layer\n action_layer_config = {\"linear_hidden_units\": [128, 64, 32],\n \"final_layer_activation\": \"None\",\n \"batch_norm\": False}\n self.action_layer = self.create_NN(256, n_action, hyperparameters=action_layer_config)\n\n self._initialize_weights()\n\n def forward(self, state):\n tic1 = time.time()\n left_eye = state[..., :3].transpose(1, 3).transpose(2, 3).contiguous()\n right_eye = state[..., 3:].transpose(1, 3).transpose(2, 3).contiguous()\n tic2 = time.time()\n features_left = self.backbone(left_eye) ## shape is [bs, 256]\n features_right = self.backbone(right_eye) ## shape is [bs, 256]\n tic3 = time.time()\n features = torch.cat([features_left, features_right], dim=-1) ## shape is [bs, 512]\n tic4 = time.time()\n action_q = self.action_layer(features)\n # print('transpose:%.5fs, cnn:%.5fs, bp:%.5f'%(tic2 - tic1, tic3 - tic2, tic4 - tic3))\n return action_q\n\n def create_NN(self, input_dim, output_dim, key_to_use=None, override_seed=None, hyperparameters=None):\n \"\"\"Creates a neural network for the agents to use\"\"\"\n default_hyperparameter_choices = {\"output_activation\": None, \"hidden_activations\": \"relu\", \"dropout\": 0.0,\n \"initialiser\": \"he\", \"batch_norm\": False,\n \"columns_of_data_to_be_embedded\": [],\n \"embedding_dimensions\": [], \"y_range\": ()}\n\n for key in default_hyperparameter_choices:\n if key not in hyperparameters.keys():\n hyperparameters[key] = default_hyperparameter_choices[key]\n\n return NN(input_dim=input_dim, layers_info=hyperparameters[\"linear_hidden_units\"] + [output_dim],\n output_activation=hyperparameters[\"final_layer_activation\"],\n batch_norm=hyperparameters[\"batch_norm\"], dropout=hyperparameters[\"dropout\"],\n hidden_activations=hyperparameters[\"hidden_activations\"], initialiser=hyperparameters[\"initialiser\"],\n columns_of_data_to_be_embedded=hyperparameters[\"columns_of_data_to_be_embedded\"],\n embedding_dimensions=hyperparameters[\"embedding_dimensions\"], y_range=hyperparameters[\"y_range\"],\n random_seed=1)\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n\nclass DQN_With_Fixed_Q_Targets_2_EYE(DQN_With_Fixed_Q_Targets):\n\n agent_name = \"DQN_TWO_EYES\"\n def __init__(self, config):\n Base_Agent.__init__(self, config)\n base_config.no_render_mode = False ## must be render mode\n\n self.q_network_local = q_network_2_EYE(n_action=self.get_action_size())\n self.q_network_target = q_network_2_EYE(n_action=self.get_action_size())\n self.q_network_optimizer = optim.SGD(self.q_network_local.parameters(),\n lr=self.hyperparameters[\"learning_rate\"], weight_decay=5e-4)\n\n self.memory = Replay_Buffer(self.hyperparameters[\"buffer_size\"], self.hyperparameters[\"batch_size\"],\n config.seed)\n self.exploration_strategy = Epsilon_Greedy_Exploration(config)\n\n if config.backbone_pretrain:\n self.load_pretrain()\n\n self.copy_model_over(from_model=self.q_network_local, to_model=self.q_network_target)\n\n self.q_network_local.to(self.q_network_local.device)\n self.q_network_target.to(self.q_network_target.device)\n\n def load_pretrain(self):\n pretrain_model_path = os.path.join(os.path.dirname(__file__), 'base_conv_net/pretrain/mobilenetv2_0.35-b2e15951.pth')\n net_dict = self.q_network_local.state_dict()\n if not torch.cuda.is_available():\n pretrain_dict = torch.load(pretrain_model_path, map_location='cpu')\n else:\n pretrain_dict = torch.load(pretrain_model_path)\n # print(net_dict.keys())\n # print(pretrain_dict.keys())\n\n load_dict = {('backbone.' + k): v for k, v in pretrain_dict.items() if\n ('backbone.' + k) in net_dict}\n net_dict.update(load_dict)\n self.q_network_local.load_state_dict(net_dict, strict=True)\n print(f'load keys:{load_dict.keys()}')\n self.logger.info(f'load keys:{load_dict.keys()}')\n\n def update_learning_rate(self, starting_lr, optimizer):\n \"\"\"Lowers the learning rate according to how close we are to the solution\"\"\"\n if self.episode_number >= self.total_episode / 3:\n new_lr = starting_lr / 10.\n elif self.episode_number >= self.total_episode * 2 / 3:\n new_lr = starting_lr / 100.\n else:\n new_lr = starting_lr\n\n for g in optimizer.param_groups:\n g['lr'] = new_lr\n\n self.logger.info(\"Learning rate {}\".format(new_lr))\n\n\nif __name__ == '__main__':\n from utilities.data_structures.Config import Config\n import gym\n ## envs import ##\n from environments.carla_enviroments import env_v1_ObstacleAvoidance\n\n # net = q_network_toa(n_action=4)\n # net.to('cuda')\n # input = torch.rand(size=(10, 3, 224, 224)).to('cuda')\n # q1, q2 = net(input)\n\n config = Config()\n config.seed = 1\n config.environment = gym.make(\"ObstacleAvoidance-v0\")\n config.num_episodes_to_run = 2000\n config.file_to_save_data_results = \"C:/my_project/Deep-Reinforcement-Learning-Algorithms-with-PyTorch/results/data_and_graphs/carla_obstacle_avoidance/data.pkl\"\n config.file_to_save_results_graph = \"C:/my_project/Deep-Reinforcement-Learning-Algorithms-with-PyTorch/results/data_and_graphs/carla_obstacle_avoidance/data.png\"\n config.show_solution_score = False\n config.visualise_individual_results = True\n config.visualise_overall_agent_results = True\n config.standard_deviation_results = 1.0\n config.runs_per_agent = 1\n config.use_GPU = True\n config.overwrite_existing_results_file = False\n config.randomise_random_seed = True\n config.save_model = True\n\n config.resume = False\n config.resume_path = ''\n config.backbone_pretrain = True\n\n config.hyperparameters = {\n \"learning_rate\": 1e-2 * 10.,\n \"batch_size\": 32,\n \"buffer_size\": 20000,\n \"epsilon\": 1.0,\n \"epsilon_decay_rate_denominator\": 1.0,\n \"discount_rate\": 0.99,\n \"tau\": 0.01,\n \"alpha_prioritised_replay\": 0.6,\n \"beta_prioritised_replay\": 0.1,\n \"incremental_td_error\": 1e-8,\n \"update_every_n_steps\": 1,\n \"linear_hidden_units\": [24, 48, 24],\n \"final_layer_activation\": \"None\",\n \"batch_norm\": False,\n \"gradient_clipping_norm\": 0.1,\n \"learning_iterations\": 1,\n \"clip_rewards\": False\n }\n\n dqn_net = DQN_With_Fixed_Q_Targets_2_EYE(config)\n # left_input = torch.rand(size=(5, 3, 224, 224)).to('cuda')\n # right_input = torch.rand(size=(5, 3, 224, 224)).to('cuda')\n # out1 = dqn_net.q_network_local(left_input, right_input)\n # out2 = dqn_net.q_network_target(left_input, right_input)\n pass\n\n\n","sub_path":"agents/DQN_agents/DQN_With_Fixed_Q_Targets.py","file_name":"DQN_With_Fixed_Q_Targets.py","file_ext":"py","file_size_in_byte":10242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"396488177","text":"# asks user for units\nunits = input(\"Please input your units: usc or metric\")\n# asks user for miles and gallons and then converts to other units\nif units == \"usc\":\n miles = float(input(\"How many miles did you drive?\"))\n gallons = float(input(\"How many gallons of gas did you use?\"))\n km = miles * 1.60934\n liters = gallons * 3.78541\n mpg = miles / gallons\n cm = 100 * liters / km\n# asks user for kilometers and liters and then converts to other units\nelif units == \"metric\":\n km = float(input(\"How many kilometers did you drive?\"))\n liters = float(input(\"How many liters of gas did you use?\"))\n miles = km * 0.621371\n gallons = liters * 0.264172\n mpg = miles / gallons\n cm = 100 * liters / km\n# determines the consumption category\nif cm > 20:\n cc = \"Extremely poor\"\nelif cm <= 20 and cm > 15:\n cc = \"Poor\"\nelif cm <= 15 and cm > 10:\n cc = \"Average\"\nelif cm <= 10 and cm > 8:\n cc = \"Good\"\nelse:\n cc = \"Excellent\"\n# prints results of the test\nprint(\" USC Metric\")\nprint(\"Distance ______________:\" , format(miles , '10.3f') , \"miles \" , format(km , '10.3f') , \"Km\")\nprint(\"Gas ___________________:\" , format(gallons , '10.3f') , \"gallons\" , format(liters , '10.3f'), \"Liters\")\nprint(\"Consumption ___________:\" , format(mpg , '10.3f') , \"mpg \" , format(cm , '10.3f') , \"l/100Km\")\nprint(\" \")\nprint(\"Gas Consumption Rating :\" , cc)","sub_path":"f2016_cs8_jas493_a1/f2016_cs8_jas493_a1.py","file_name":"f2016_cs8_jas493_a1.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"402352359","text":"from utils import *\n\ninp = get_input(2020, 18)\n\nexample = \"2 * 3 + (4 * 5)\"\n\ndef chunks(l):\n chunks = []\n while l:\n if l[0] != \"(\":\n print(chunks)\n chunks.append(l[0])\n l = l[2:]\n continue\n\n depth = 0\n s = \"\"\n for ix, c in enumerate(l):\n s += c\n if c == \"(\":\n depth += 1\n elif c == \")\":\n depth -= 1\n if depth == 0:\n chunks.append(s[1:-1])\n l = l[len(s):].strip()\n break\n return chunks\n\ndef evaluate(ast):\n if len(ast) == 1:\n return int(ast)\n\n left, op, right = ast\n if op == \"*\":\n res = evaluate(left) * evaluate(right)\n elif op == \"+\":\n res = evaluate(left) + evaluate(right)\n else:\n raise Exception(\"Invalid op: \" + op)\n\n return res\n\ndef hack(expr):\n return expr[::-1].replace(\"(\", \"!\").replace(\")\", \"(\").replace(\"!\", \")\")\n\ndef listfind(l, item):\n for ix, v in enumerate(l):\n if v == item:\n return ix\n return -1\n\ndef ev2(l):\n if l.isdigit():\n return int(l)\n\n cs = chunks(l)\n while len(cs) > 1:\n opIx = listfind(cs, \"+\")\n if opIx == -1:\n opIx = listfind(cs, \"*\")\n\n left = ev2(cs[opIx-1])\n op = cs[opIx]\n right = ev2(cs[opIx+1])\n res = (left + right) if op == \"+\" else (left * right)\n\n cs = cs[:opIx-1] + [str(res)] + cs[opIx+2:]\n\n return ev2(cs[0])\n\n\nprint(sum([ev2(l) for l in inp.split(\"\\n\")]))\n","sub_path":"2020/day18.py","file_name":"day18.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"453653833","text":"# Euler 36 : Double-base palindromes\n# Link : https://projecteuler.net/problem=36\n\n#sum of all numbers less than 1,000,000 that are palindromic in base 10 and base 2\n\ntotal = 0\nfor num in range(1,1000001,2):\n\n\tif str(num) == str(num)[::-1]:\n\t\tx = bin(num).replace(\"0b\",\"\")\n\t\tif str(x) == str(x)[::-1]:\n\t\t\ttotal = total + num\n\nprint(total)","sub_path":"problem_036/solution_1.py","file_name":"solution_1.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"279127593","text":"#NEW WINDOW IN TKINTER\r\n\r\n\r\n\r\n#Importar librería\r\nfrom tkinter import *\r\nfrom PIL import ImageTk, Image\r\nfrom tkinter import messagebox\r\n\r\nroot = Tk()\r\nroot.title(\"Code\")\r\nroot.iconbitmap('a.ico')\r\n\r\ntop1 =Toplevel()\r\n#Definir funcion para accion open\r\n\r\n#Nota: la imagen debe ser asignada a una variable global para que pueda funcionar\r\ndef open():\r\n global my_img\r\n top1 = Toplevel()\r\n top1.title(\"Titulo de Segunda Ventana\")\r\n my_img = ImageTk.PhotoImage(Image.open(\"img2.jpg\"))\r\n my_label = Label (top1, image= my_img).pack()\r\n btn2= Button(top1, text=\"Close Window\", command= top.destroy).pack()\r\n\r\n\r\n\r\n\r\n#Crear boton\r\nbtn = Button(top1, text=\"Open Second Window\", command = open). pack()\r\n\r\n\r\n#Ejemplo de Crear ventana nueva\r\ntop = Toplevel()\r\ntop.title(\"Titulo de Segunda Ventana\")\r\n#Colocar etiqueta en la nueva ventana\r\n#lbl = Label(top, text= \"Hola Mundo\").pack()\r\n\r\n\r\n#Colocar imagen en la ventana nueva\r\nmy_img = ImageTk.PhotoImage(Image.open(\"img1.jpg\"))\r\nmy_label = Label (top, image= my_img).pack()\r\n\r\n#Bucle infinito\r\nroot.mainloop()","sub_path":"PYTHON TKINTER -FREECODE CAMP/07_newwindow.py","file_name":"07_newwindow.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"454304274","text":"from typing import List\n\nimport pandas as pd\n\nimport datetime\n\nCONFIRMED_CASES_URL = f\"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data\" \\\nf\"/csse_covid_19_time_series/time_series_19-covid-Confirmed.csv \"\n\nconfirmed_cases = pd.read_csv(CONFIRMED_CASES_URL, error_bad_lines=False)\n\n\ndef poland_cases_by_date(day, month, year: int = 2020) -> int:\n d = datetime.date(year,month,day)\n d1 = d.strftime('%m/%d/%y').lstrip(\"0\").replace(\" 0\", \" \").replace(\"/0\",\"/\")\n polska = confirmed_cases.loc[confirmed_cases[\"Country/Region\"]==\"Poland\"]\n result = polska[d1].values[0]\n return result\n\ndef top5_countries_by_date(day: int, month: int, year: int = 2020) -> List[str]:\n\n d = datetime.date(year, month, day)\n\n d1 = d.strftime('%m/%d/%y').lstrip(\"0\").replace(\" 0\", \" \").replace(\"/0\",\"/\")\n\n countries = confirmed_cases[[\"Country/Region\", d1]].groupby([\"Country/Region\"]).sum().sort_values(by=d1, ascending=False).head(5)\n\n return list(countries.index)\n\n\n\ndef no_new_cases_count(day: int, month: int, year: int = 2020) -> int:\n \n d = datetime.date(year, month, day)\n\n d2 = d.strftime('%m/%d/%y').lstrip(\"0\").replace(\" 0\", \" \").replace(\"/0\",\"/\")\n\n wczoraj = d - datetime.timedelta(days=1)\n\n wczoraj_str = wczoraj.strftime('%m/%d/%y').lstrip(\"0\").replace(\" 0\",\" \").replace(\"/0\",\"/\")\n\n return len(confirmed_cases.loc[confirmed_cases[d2] - confirmed_cases[wczoraj_str]!=0].index)\n","sub_path":"hw1.py","file_name":"hw1.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"578715302","text":"import db_manager\nfrom jogador import Jogador\n\ndb = db_manager.DBManager(\"tictactoe.db\")\n\n# jogador = Jogador(usuario=\"eduardo\",\n# cpf=\"99999999999\",\n# email=\"eduard@gmail.com\",\n# senha=\"12344321\")\n\n# db.cadastro(jogador)\n\njogador = db.login(\"eduardo\", \"12344321\")\njogador.partidas += 1\njogador.vitorias += 1\n\nif db.update(jogador):\n print(\"update 1 realizado com sucesso\")\n\njogador2 = db.login(\"pedro\", \"12344321\")\njogador2.partidas += 1\njogador2.empates += 1\n\nif db.update(jogador2):\n print(\"update 2 realizado com sucesso\")\n\ndb.disconnect()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"481337450","text":"#!/usr/bin/env python\n# -*-coding:utf-8 -*-\n\"\"\"\nCreated on 2018/8/9\n@author: wanju Sun\n@group : \n@contact: \n\"\"\"\nfrom mrjob.job import MRJob\nimport os\nimport re\n\nclass WordCntPerDoc(MRJob):\n\n def mapper(self, _, line):\n input_file = os.environ['mapreduce_map_input_file']\n docName = os.path.splitext(input_file.split('/t')[-1])[0]\n stopWords = set('is a not at if else then in and but I am we they the are to there that this'.split())\n words = [word.lower() for word in line.split(' ') if word.lower() not in stopWords and word]\n for word in words:\n #key1 = '{word},{doc}'.format(word=word, doc=docName)\n yield '%s,%s' % (word, docName), 1\n\n def reducer(self, key, values):\n yield key, sum(values)\n\n\nif __name__ == \"__main__\":\n WordCntPerDoc.run()","sub_path":"AiInsight/datamining/td-idf/WordCntPerDoc.py","file_name":"WordCntPerDoc.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"326599307","text":"import logging\nimport traceback\n\nimport pytest\nfrom _pytest_art.marks import (\n network,\n sla,\n storage,\n coresystem,\n virt,\n upgrade,\n storages,\n)\nfrom _pytest_art.testlogger import TestFlowInterface\nfrom art.test_handler.exceptions import TearDownException\nfrom art.test_handler.settings import ART_CONFIG\n\n\nlogger = logging.getLogger(__name__)\ntestflow = TestFlowInterface\n\nSTORAGE_TYPE = ART_CONFIG['RUN'].get('storage_type')\nNOT_APPLICABLE = 'N/A'\n\n\n# @storages decorator define all storage types available that test can run\n# with. NEVER change it here, but in child class, our plugin count that base\n# class is decorated with NOT_APPLICABLE.\n@storages((NOT_APPLICABLE,))\n@pytest.mark.usefixtures('storage')\nclass BaseTestCase(object):\n \"\"\"\n Base test case class for unittest testing\n \"\"\"\n test_failed = False\n\n @property\n def __name__(self):\n return traceback.extract_stack(None, 2)[0][2]\n\n # Invokes storage fixture before all fixtures to set storage.\n @pytest.fixture(autouse=True, scope='class')\n def storage_setup(request, storage):\n pass\n\n @classmethod\n def teardown_exception(cls):\n try:\n if cls.test_failed:\n raise TearDownException(\"TearDown failed with errors\")\n finally:\n cls.test_failed = False\n\n # current storage type on run time\n storage = None\n\n\n@pytest.mark.usefixtures(\"reset_object\")\n@storage\n@storages(set(ART_CONFIG['RUN']['storages']))\nclass StorageTest(BaseTestCase):\n \"\"\"\n Basic class for storage tests\n \"\"\"\n # STORAGE_TYPE value sets type of storage when running\n # without the --with-multiplier flag\n storage = STORAGE_TYPE if STORAGE_TYPE != \"none\" else 'iscsi'\n\n\n@network\nclass NetworkTest(BaseTestCase):\n \"\"\"\n Basic class for network tests\n \"\"\"\n\n\n@virt\nclass VirtTest(BaseTestCase):\n \"\"\"\n Basic class for compute/virt tests\n \"\"\"\n\n\n@sla\nclass SlaTest(BaseTestCase):\n \"\"\"\n Basic class for compute/sla tests\n \"\"\"\n\n\n@coresystem\nclass CoreSystemTest(BaseTestCase):\n \"\"\"\n Basic class for core system tests\n \"\"\"\n\n\n@upgrade\nclass UpgradeTest(BaseTestCase):\n \"\"\"\n Basic class for upgrade test\n \"\"\"\n","sub_path":"art/unittest_lib/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"520731974","text":"from ROOT import *\n\n# janky methods for mapping the samples cross sections, the sample's small3 tree, and the sample's treeChecker tree\n# John Hakala 5/11/2016\n\ndef getDDPrefix():\n return \"ddTree_\"\n\ndef getSmallPrefix():\n return \"smallified_\"\n\ndef getMCbgSampleKfactors():\n sampleXsects = {}\n sampleXsects[ \"gJets100To200.root\" ] = 1.6*0.8\n sampleXsects[ \"gJets200To400.root\" ] = 1.6*0.8\n sampleXsects[ \"gJets400To600.root\" ] = 1.4*0.8\n sampleXsects[ \"gJets600ToInf.root\" ] = 1.0*0.8\n sampleXsects[ \"qcd300to500.root\" ] = .7 *0.8\n sampleXsects[ \"qcd500to700.root\" ] = .7 *0.8\n sampleXsects[ \"qcd700to1000.root\" ] = .7 *0.8\n sampleXsects[ \"qcd1000to1500.root\" ] = .7 *0.8\n sampleXsects[ \"qcd1500to2000.root\" ] = .7 *0.8\n sampleXsects[ \"qcd2000toInf.root\" ] = .7 *0.8\n #sampleXsects[ \"qcd200to300.root\" ] = 1 \n #sampleXsects[ \"dyJetsQQ-180.root\" ] = 1.23 \n #sampleXsects[ \"wJetsQQ-180.root\" ] = 1.21*0.8 \n return sampleXsects\n\ndef getMCbgSampleXsects():\n kFactors = getMCbgSampleKfactors()\n sampleXsects = {}\n sampleXsects[ \"gJets100To200.root\" ] = 9238\n sampleXsects[ \"gJets200To400.root\" ] = 2305\n sampleXsects[ \"gJets400To600.root\" ] = 274.4\n sampleXsects[ \"gJets600ToInf.root\" ] = 93.46 \n sampleXsects[ \"qcd300to500.root\" ] = 347700 \n sampleXsects[ \"qcd500to700.root\" ] = 32100 \n sampleXsects[ \"qcd700to1000.root\" ] = 6831 \n sampleXsects[ \"qcd1000to1500.root\" ] = 1207 \n sampleXsects[ \"qcd1500to2000.root\" ] = 119.9 \n sampleXsects[ \"qcd2000toInf.root\" ] = 25.24 \n #sampleXsects[ \"qcd200to300.root\" ] = 1712000 \n #sampleXsects[ \"dyJetsQQ-180.root\" ] = 1187 \n #sampleXsects[ \"wJetsQQ-180.root\" ] = 95.14 \n return sampleXsects\n\ndef getMCbgSampleEvents(small3Dir):\n sampleXsects=getMCbgSampleXsects()\n sampleEvents = {}\n for key in sampleXsects:\n mcBGfileName = \"%s/%s%s\" % (small3Dir, getSmallPrefix(), key)\n #print \"the small3 input filename is: %s\" % mcBGfileName\n mcBGfile = TFile( mcBGfileName )\n #print mcBGfile\n hCounter = mcBGfile.Get(\"ntuplizer/hCounter\")\n nEvents = hCounter.GetBinContent(1)\n sampleEvents[key]=nEvents;\n return sampleEvents\n\ndef getSignalsToInclude():\n return [ \"sig_m750.root\",\n \"sig_m850.root\",\n \"sig_m1000.root\",\n \"sig_m1150.root\",\n \"sig_m1300.root\",\n \"sig_m1450.root\",\n \"sig_m1600.root\",\n \"sig_m1750.root\",\n \"sig_m1900.root\",\n \"sig_m2050.root\",\n \"sig_m2450.root\",\n \"sig_m2850.root\",\n \"sig_m3250.root\",\n ]\n\ndef getWeightsDict(bkgSmall3Dir):\n sampleKfactors = getMCbgSampleKfactors() \n sampleXsects = getMCbgSampleXsects() \n sampleEvents = getMCbgSampleEvents(bkgSmall3Dir)\n\n lumi = 35900\n\n sampleWeights = {}\n for key in sampleXsects:\n expectedEvents = lumi*sampleXsects[key]\n weight = sampleKfactors[key]*expectedEvents/sampleEvents[key]\n sampleWeights[key] = (weight, \"bkg\")\n signalWeight = .5\n #for signalToInclude in getSignalsToInclude():\n # sampleWeights[signalToInclude] = signalWeight\n sampleWeights[\"data2016SinglePhoton.root\"] = (1 , \"data\")\n sampleWeights[ \"sig_m750.root\" ] = (.8*0.4, \"sig\")\n sampleWeights[ \"sig_m850.root\" ] = (.8*0.4, \"sig\")\n sampleWeights[ \"sig_m1000.root\" ] = (.7*0.4, \"sig\")\n sampleWeights[ \"sig_m1150.root\" ] = (.7*0.4, \"sig\")\n sampleWeights[ \"sig_m1300.root\" ] = (.7*0.4, \"sig\")\n sampleWeights[ \"sig_m1450.root\" ] = (.6*0.4, \"sig\")\n sampleWeights[ \"sig_m1600.root\" ] = (.6*0.4, \"sig\")\n sampleWeights[ \"sig_m1750.root\" ] = (.6*0.4, \"sig\")\n sampleWeights[ \"sig_m1900.root\" ] = (.5*0.4, \"sig\")\n sampleWeights[ \"sig_m2050.root\" ] = (.5*0.4, \"sig\")\n sampleWeights[ \"sig_m2450.root\" ] = (.5*0.4, \"sig\")\n sampleWeights[ \"sig_m2850.root\" ] = (.4*0.4, \"sig\")\n sampleWeights[ \"sig_m3250.root\" ] = (.4*0.4, \"sig\")\n return sampleWeights\n\ndef getMCbgWeightsDict(bkgSmall3Dir):\n weights = getWeightsDict(bkgSmall3Dir) \n nonMCbgs = getSignalsToInclude()\n nonMCbgs.append(\"data2016SinglePhoton.root\")\n for nonMCbg in nonMCbgs:\n weights.pop(nonMCbg)\n return weights\n\ndef getMCbgOrderedList():\n return [ \n #\"dyJetsQQ-180.root\" ,\n #\"wJetsQQ-180.root\" ,\n \"qcd2000toInf.root\" ,\n \"qcd1500to2000.root\" ,\n \"qcd1000to1500.root\" ,\n \"qcd700to1000.root\" ,\n \"qcd500to700.root\" ,\n \"qcd300to500.root\" ,\n #\"qcd200to300.root\" ,\n \"gJets600ToInf.root\" ,\n \"gJets400To600.root\" ,\n \"gJets200To400.root\" ,\n \"gJets100To200.root\" \n ]\n\ndef getMCbgColors():\n color = TColor()\n sampleColors = {}\n #sampleColors[\"QCD_HT100to200.root\" ] = color.GetColor(.1, 0.3, 0.25)\n sampleColors[\"gJets100To200.root\" ] = color.GetColor(.475*1.1, .6*1.2, 1.0)\n sampleColors[\"gJets200To400.root\" ] = color.GetColor(.475, .6, 1.0)\n sampleColors[\"gJets400To600.root\" ] = color.GetColor(.35, .5, 0.85)\n sampleColors[\"gJets600ToInf.root\" ] = color.GetColor(.225, .3, 0.7) \n #sampleColors[\"qcd200to300.root\" ] = color.GetColor(.31*1.2, 1.0, 0.425*1.2)\n sampleColors[\"qcd300to500.root\" ] = color.GetColor(.31, .95, 0.425)\n sampleColors[\"qcd500to700.root\" ] = color.GetColor(.28, .9, 0.4)\n sampleColors[\"qcd700to1000.root\" ] = color.GetColor(.25, .8, 0.375)\n sampleColors[\"qcd1000to1500.root\" ] = color.GetColor(.22, .7, 0.35)\n sampleColors[\"qcd1500to2000.root\" ] = color.GetColor(.19, .6, 0.325)\n sampleColors[\"qcd2000toInf.root\" ] = color.GetColor(.16, .5, 0.3)\n #sampleColors[\"dyJetsQQ-180.root\" ] = color.GetColor(.6, .2, .2)\n #sampleColors[\"wJetsQQ-180.root\" ] = color.GetColor(.85, .85, 0.3)\n return sampleColors\n\ndef getMCbgLabels():\n color = TColor()\n legendLabels = {}\n legendLabels[\"gJets100To200.root\" ] = \"#gamma#plusjets[100,200]\"\n legendLabels[\"gJets200To400.root\" ] = \"#gamma#plusjets[200,400]\"\n legendLabels[\"gJets400To600.root\" ] = \"#gamma#plusjets[400,600]\"\n legendLabels[\"gJets600ToInf.root\" ] = \"#gamma#plusjets[600,#infty]\"\n #legendLabels[\"qcd200to300.root\" ] = \"QCD[200,300]\"\n legendLabels[\"qcd300to500.root\" ] = \"QCD[300,500]\"\n legendLabels[\"qcd500to700.root\" ] = \"QCD[500,700]\"\n legendLabels[\"qcd700to1000.root\" ] = \"QCD[700,1000]\"\n legendLabels[\"qcd1000to1500.root\" ] = \"QCD[1000,1500]\"\n legendLabels[\"qcd1500to2000.root\" ] = \"QCD[1500,2000]\"\n legendLabels[\"qcd2000toInf.root\" ] = \"QCD[2000,#infty]\"\n #legendLabels[\"dyJetsQQ-180.root\" ] = \"DY#plusjets[180,#infty]\"\n #legendLabels[\"wJetsQQ-180.root\" ] = \"W#plusjets[600,#infty]\"\n #legendLabels[\"QCD_HT100to200\" ] = \"QCD[100,200]\"\n return legendLabels\n\ndef getSmall3ddTreeDict(ddDir):\n s3dd = {}\n \n #s3dd[\"QCD_HT100to200.root\" ] = \"%s/ddTree_QCD_HT100to200.root\"%ddDir\n #s3dd[\"gJets100To200\" ] = \"%s/ddTree_GJets100-200\" % ddDir\n s3dd[\"gJets200To400.root\" ] = \"%s/ddTree_gJets200To400\" % ddDir\n s3dd[\"gJets400To600.root\" ] = \"%s/ddTree_gJets400To600\" % ddDir\n s3dd[\"gJets600ToInf.root\" ] = \"%s/ddTree_gJets600ToInf\" % ddDir\n #s3dd[\"qcd200to300.root\" ] = \"%s/ddTree_qcd200to300\" % ddDir\n s3dd[\"qcd300to500.root\" ] = \"%s/ddTree_qcd300to500\" % ddDir\n s3dd[\"qcd500to700.root\" ] = \"%s/ddTree_qcd500to700\" % ddDir\n s3dd[\"qcd700to1000.root\" ] = \"%s/ddTree_qcd700to1000\" % ddDir\n s3dd[\"qcd1000to1500.root\" ] = \"%s/ddTree_qcd1000to1500\" % ddDir\n s3dd[\"qcd1500to2000.root\" ] = \"%s/ddTree_qcd1500to2000\" % ddDir\n s3dd[\"qcd2000toInf.root\" ] = \"%s/ddTree_qcd2000toInf\" % ddDir\n #s3dd[\"dyJetsQQ-180.root\" ] = \"%s/ddTree_dyJetsQQ-180\" % ddDir\n #s3dd[\"wJetsQQ-180.root\" ] = \"%s/ddTree_wJetsQQ-180\" % ddDir\n\n return s3dd\n","sub_path":"getMCbgWeights.py","file_name":"getMCbgWeights.py","file_ext":"py","file_size_in_byte":7751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"475066875","text":"import rospy\nfrom std_msgs.msg import String\nfrom mav_msgs.msg import CommandTrajectory\nfrom geometry_msgs.msg import PoseStamped\nimport time\nimport numpy as np \nimport math\n\n\n\nwaypoints = None \ni = 0\n\ndef callback(data):\n\tglobal i \n\tglobal waypoints\n\tyaw = 0.0\n\tq0 = data.pose.orientation.w\n\tq1 = data.pose.orientation.x\n\tq2 = data.pose.orientation.y\n\tq3 = data.pose.orientation.z\n\tyaw = math.atan2(2.0 * (q3 * q0 + q1 * q2) , - 1.0 + 2.0 * (q0 * q0 + q1 * q1))\n\tX_cur = np.array([data.pose.position.x, data.pose.position.y, data.pose.position.z, yaw])\n\ttry:\n\t\tX_des = waypoints[i]\n\texcept:\n\t\tprint(\"Program ended. The quad reached its path.\")\n\t\treturn 0\n\terror = np.sum(np.abs(X_des - X_cur))\n\tif error < 0.5:\n\t\tprint(\"Waypoint: {}/{}\".format(i,waypoints.shape[0]))\n\t\ti = i + 1\n\t\tprint(waypoints[i])\n\n\nif __name__ == '__main__':\n\tglobal i \n\tglobal waypoints\n\twaypoints = np.load('path.npy')\n\tpub_init = rospy.Publisher('/quad/command/trajectory', CommandTrajectory, queue_size=10)\n\tpub_contr = rospy.Publisher('/cmd_3dnav', CommandTrajectory, queue_size=10)\n\trospy.Subscriber(\"/quad/ground_truth/pose\", PoseStamped, callback)\n\trospy.init_node('quad_planner', anonymous=True)\n\trate = rospy.Rate(10) # 10hz\n\n\tnext_value = False\n\tx = y = z = t =0.0\n\tt = time.time()\n\n\twhile not rospy.is_shutdown():\n\t\ttry:\n\t\t\t[x,y,z,t] = waypoints[i]\n\t\texcept:\n\t\t\tprint(\"Program Ended.\")\n\t\t\tbreak\n\t\t\t\n\n\t\tinit_msg = CommandTrajectory()\n\t\tinit_msg.snap.x = 0.0\n\t\tinit_msg.snap.y = 0.0\n\t\tinit_msg.snap.z = 1.0\n\t\t\n\n\t\tpub_init.publish(init_msg)\n\n\t\tcontr_msg = CommandTrajectory()\n\t\tcontr_msg.position.x = x\n\t\tcontr_msg.position.y = y\n\t\tcontr_msg.position.z = z\n\t\tcontr_msg.yaw = t\n\t\tpub_contr.publish(contr_msg)\n\n\t\trate.sleep()","sub_path":"src/scripts/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"7573988","text":"name = input(\"PLEASE STATE YOUR NAME. \")\nprint(\"nice name. I like it. \")\nwhere=input(\"PLEASE STATE WHERE YOU ARE FROM. \")\nprint(\"I have been to\", where,\"many times. I hope to go again soon.\")\nfavnum = int(input(\"AS SIMPLY AS POSSIBLE, TELL ME YOUR FAVORITE NUMBER. \"))\n\nfavcar = input(\"WHAT IS YOUR DREAM CAR MODEL, BRAND, AND YEAR? \")\n\ncarabs = float(input(\"WHAT IS THE TOTAL PRICE OF THIS CAR? \"))\n\ncarmon = float(input(\"WHAT IS THE MONTHLY INTEREST OF THIS CAR? \"))\n\nhowmany = float(input(\"HOW MANY PAYMENTS DO YOU NEED TO MAKE?\"))\n\nmpymt = (carmon * carabs) / (1 - (carmon + 1)**-howmany)\n\nprint(\"THE TOTAL COST OF\",favcar,\"WILL BE\",mpymt,)\n\nprint(\"GOOD LUCK\")","sub_path":"assignment_two.py","file_name":"assignment_two.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"52780506","text":"import contact\r\n\r\nclass dema (object):\r\n \"Description of class\"\r\n axiagrammatosimou = 0.0\r\n apostoleas = contact()\r\n paraliptis = contact()\r\n megethos = ''\r\n periexomeno = ''\r\n varos = 0.0\r\n\r\n def __init__(self,a,ap,pa,m,p,v):\r\n self.axiagrammatosimou = a\r\n self.apostoleas = ap\r\n self.paraliptis = pa\r\n self.megethos = m\r\n self.periexomeno = p\r\n self.varos = v\r\n\r\n\r\n","sub_path":"repos/Pythonapplication5/Pythonapplication5/dema.py","file_name":"dema.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"101061181","text":"import random\ndef email_address (email, domain):\n if email.count (\"@\") != 1:\n return False\n if email[0] == \"@\":\n return False\n if email [(email.find (\"@\") +1) :] != domain:\n return False\n return True\ndef name_part (email):\n return email [:(email.find (\"@\"))]\ndef random_string(a):\n return random.choice(a)\ndef check(question):\n if question.lower().capitalize() == \"Goodbye\":\n return True\ndef random_string_1(b):\n return random.choice(b)\ndef str_in_str(word, question):\n return word.lower() in question.lower()\ndef random_shutdown():\n num=random.randint (1,100)\n if num < 15:\n return True\n\nemail = input(str(\"type a email adress \"))\ndomain = \"pop.ac.uk\"\nif email_address(email, domain) is True:\n print (\"Welcome \" +(email [:-10]))\n a = [\"joe\", \"Gary\", \"Steven\", \"Tony\", \"Adam\", \"Linda\"]\n print(\"Hello, my name is \" + random_string(a) + \". i would be helping you today\")\n count = 1\n while True:\n question = str(input(\"hello \" + name_part(email) + \",\" \" how can i help you? \"))\n if random_shutdown():\n print(\"session ended\")\n break\n if \"goodbye\" in question.lower():\n print(\"exit\")\n break\n elif \"library\" in question.lower():\n print(\"Sorry the library is closed today\")\n\n elif \"i need coffee\" in question.lower():\n print(\"yes you doo\")\n\n elif \"wifi\" in question.lower():\n print(\"wifi connection is great across campus\")\n\n elif \"deadline\" in question.lower():\n print(\"Sorry, your deadline has been extended 2 working days\")\n\n\n elif question:\n b = [\"Yes\", \"No\", \"Sounds interesting, tell me more\", \"haha! wow that was sick! tell me more\", \"maybe\"]\n print(\"\" + random_string_1(b) + \"\")\n count += 1\n\n else:\n print(\"Error\")\n\n\n\n\n\n\n\n","sub_path":"just random scripts/lowerctest.py","file_name":"lowerctest.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"203658358","text":"from airflow.operators.bash_operator import BashOperator\nfrom airflow import DAG\n\nfrom airflow.contrib.operators.kubernetes_pod_operator import KubernetesPodOperator\nfrom airflow.utils.dates import days_ago\n\nargs = {\n \"project_id\": \"ppl-0924142533\",\n}\n\ndag = DAG(\n \"ppl-0924142533\",\n default_args=args,\n schedule_interval=\"@once\",\n start_date=days_ago(1),\n description=\"Created with Elyra 3.0.1 pipeline editor using `ppl.pipeline`.\",\n is_paused_upon_creation=False,\n)\n\n\nop_842c49e3_a2e0_4080_9ffa_845b3c04d5a1 = BashOperator(\n namespace=\"default\",\n task_id=\"test0\",\n xcom_push=False,\n env=\"\",\n output_encoding=\"utf-8\",\n bash_command=\"echo `hostname`\",\n inputs=[],\n outputs=[],\n in_cluster=True,\n config_file=\"None\",\n dag=dag,\n)\n","sub_path":"ppl-0924142533.py","file_name":"ppl-0924142533.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"36888552","text":"lang = [\"un\", \"deux\", \"trois\"]\nl = [0, 1, 2, 3]\nl = range(1, 10, 2)\n\n# tri dans un tableau\nlanguages = [\"js\", \"python\", \"c\", \"ada\", \"perl\"]\nlanguages.sort()\nprint(languages)\n\nlanguages.reverse()\nprint(languages)\n\nprint(languages[2:3])\n\nprint(type(lang))\nfor i in l:\n print(\"%d\" % (i))\nprint(\"\\n\")\nprint(len(lang))\n\n\ndef myFilter(fct, lst):\n result = []\n for elt in lst:\n if fct(elt):\n result.append(elt)\n return result\n\n\ndef test(elt):\n return elt < 10\n\n\ncollection = [11, 12, 13, 9]\n\nprint(myFilter(test, collection))\n\n\ndef mymap(fct, lst):\n result = []\n for elt in lst:\n result.append(fct(elt))\n return result\n\n\ndef cube(val):\n return val * val * val\n\n\n# print(mymap(cube, collection))\n\n#tuple (non modifiable mais toujours filtrable différence avec les listes, collection)#\n\"\"\"\n syntaxe du tuple est entre () et la list est entre []\n\"\"\"\nlst = [cube(elt) for elt in collection]\nlst2 = [test(elt) for elt in collection]\nlst3 = [elt.upper() for elt in languages if len(elt) > 4]\n\n# invertion de type tuple et list\ntupleToLst = list(lst3)\n#t2 = tuple(tupleToLst)\n#lst4 = [elt.lower() for elt in tupleToLst if len(elt) > 4]\ntupleToLst.append('javascript')\n\nprint(lst)\nprint(lst2)\nprint(tupleToLst)\nprint(lst3)\n\n\ndef counter(x):\n if x > 2:\n return x\n else:\n return False\n\n\nlst5 = (1, 2, 3, 4, 5, 6)\n\nprint([counter(elt) for elt in lst5])\n","sub_path":"list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"304216123","text":"#!/usr/bin/env python\n\n\"\"\"\nmetrics_msitools.py - Make a frequency table of #supporting reads per locus.\nCompute some other interesting stats as well. Stats in brackets would be\ninteresting but require joining the original repeat database.\n * how many sites have evidence for multiple alleles\n * mapQ histogram\n * strand distribution\n * chrom distribution\n * STR len (in bp) distribution\n * difference between observed STR len and ref STR len\n not absolute value: negative values show obs < ref len\n * STR unit distribution: mono, di, tri, tetra\n * STR unit multiple distribution: (end-start) / unit\n * STR genomic location distribution: intergenic, intronic, exonic\n\nOptions for filtering reads:\n * haploid options: --only-x, --only-y, only use X or Y alignments, depending\n on option (both cannot be specified together). For male samples, these\n should be haploid and thus deviations from expectation should reflect\n experimental errors.\n * maximum mapQ option: --mapq60, only use mapq60 (very high confidence)\n alignments\n * minimum repeat unit option: --min-unit (recommended=3, default=1?). Only\n consider loci where the repeat unit (end-start+1) / unit size is greater\n than the specified value. This could be useful to remove questionable\n loci, like 2~3 repeat units of tri or tetranucleotide repeats.\n\"\"\"\n\nimport sys\nfrom argparse import ArgumentParser\nfrom strlocusiterator import STRLocusIterator\n\nparser = ArgumentParser()\nSTRLocusIterator.add_parser_args(parser)\nargs = parser.parse_args()\n\nlocus_f = STRLocusIterator(**vars(args))\nfor (chrom, start, end, unit, region, reads) in locus_f:\n # Don't do anything, just accumulate metrics\n continue\n\nfor (description, value) in locus_f.filter_metrics():\n print(\"%s\\t%d\" % (description, value))\n\nfor (description, hist) in locus_f.hist_metrics():\n print(description)\n for k in sorted(hist.keys()):\n print(\"%s\\t%d\" % (k, hist[k]))\n","sub_path":"candidates/metrics_msitools.py","file_name":"metrics_msitools.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"46104322","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Client',\n fields=[\n ('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Project',\n fields=[\n ('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),\n ('title', models.CharField(null=True, max_length=200)),\n ('pub_date', models.DateTimeField(verbose_name='date published', null=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Signup',\n fields=[\n ('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),\n ('company_type_selected', models.CharField(max_length=200, default='None', choices=[('ApS', 'aps'), ('I/S', 'is'), ('A/S', 'as'), ('K/S', 'ks'), ('Amba', 'amba'), ('IVS', 'IVS'), ('Enkeltmandsvirksomhed', 'Enkeltmandsvirksomhed')])),\n ('company_name', models.CharField(max_length=100)),\n ('cvr', models.CharField(unique=True, max_length=10)),\n ('address', models.CharField(max_length=100)),\n ('zip_code', models.CharField(max_length=5)),\n ('city', models.CharField(max_length=40)),\n ('first_name', models.CharField(max_length=60)),\n ('last_name', models.CharField(max_length=60)),\n ('phone', models.CharField(max_length=12)),\n ('email', models.EmailField(unique=True, max_length=100)),\n ('created_date', models.DateField(auto_now_add=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='client',\n name='client',\n field=models.ForeignKey(to='customers.Signup'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='client',\n name='project',\n field=models.ManyToManyField(null=True, blank=True, to='customers.Project'),\n preserve_default=True,\n ),\n ]\n","sub_path":"customers/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"305596415","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport os\nimport sys\nfrom typing import List, Tuple\n\nfrom tokens import *\n\n\ndef modify(line: List[Tuple[int, str]]):\n for i, chunk in enumerate(line):\n\n if chunk[1] in (START, STOP):\n line[i] = (i, \"O\")\n continue\n\n if \"-\" in chunk[1]:\n body, tail = chunk[1].split(\"-\")\n\n if tail == \"I\":\n if i <= 0 or not \"-\" in line[i - 1][1]:\n line[i] = (i, \"O\")\n continue\n\n _body, _tail = line[i - 1][1].split(\"-\")\n\n if body != _body:\n line[i] = (i, \"O\")\n\n return line\n\n\ndef compress(lines: List[List[str]]):\n ret = []\n\n for line in lines:\n \n line = [ (i, tag) for i, tag in enumerate(line) ]\n\n line = modify(line)\n\n ret_t = []\n for i, t_i in line:\n if t_i == \"O\":\n ret_t += [(i, i, t_i)]\n\n elif t_i[-2:] == \"-B\":\n\n j = i\n for k, t_k in line[i + 1:]:\n if t_k[-2:] != \"-I\":\n break\n else:\n j = k\n\n ret_t += [(i, j, t_i[:-2])]\n\n ret += [ret_t]\n\n return ret\n\n\ndef accuracy(\n ref: List[List[str]],\n hyp: List[List[str]],\n delimiter: str=\"/\",\n quiet: bool=False,\n):\n ref = compress(ref)\n hyp = compress(hyp) \n\n tp = tn = fp = fn = 0\n eps = 1e-9\n\n for line_r, line_h in zip(ref, hyp):\n for h in line_h:\n if h[-1] == \"O\":\n if h in line_r:\n tn += 1\n else:\n fn += 1\n else:\n if h in line_r:\n tp += 1\n else:\n fp += 1\n\n precision = tp / (tp + fp + eps)\n recall = tp / (tp + fn + eps)\n f1_score = 2 * precision * recall / ( precision + recall + eps)\n\n if not quiet:\n print(\"precision: \", precision)\n print(\"recall : \", recall)\n print(\"f1_score : \", f1_score)\n\n return precision, recall, f1_score\n \n\ndef main():\n\n if len(sys.argv) < 3:\n raise Exception(\"Usage: python accuracy.py reference hypothesis delimiter\")\n\n for i in (1, 2):\n if os.path.exists(sys.argv[i]) is False:\n raise Exception(\"{} does not exist\".format(sys.argv[i]))\n\n if len(sys.argv) < 4:\n delimiter = \"/\"\n else:\n delimiter = sys.argv[3]\n\n ref = compress(\n [\n [x.rstrip().split(delimiter)[-1] for x in line.split()]\n for line in open(sys.argv[1]).readlines()\n ]\n )\n\n hyp = compress( \n [\n line.split()\n for line in open(sys.argv[2]).readlines()\n ]\n )\n\n accuracy(ref, hyp, delimiter)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/accuracy.py","file_name":"accuracy.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"209385209","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCrawler for novels from [LNMTL](https://lnmtl.com).\n\"\"\"\nimport re\nimport sys\nimport json\nimport requests\nfrom os import path\n# from shutil import rmtree\nimport concurrent.futures\nfrom bs4 import BeautifulSoup\nfrom .helper import save_chapter\nfrom .binding import novel_to_epub, novel_to_mobi\n\nclass LNMTLCrawler:\n '''Crawler for LNMTL'''\n\n executor = concurrent.futures.ThreadPoolExecutor(max_workers=3)\n\n def __init__(self, novel_id, start_chapter=None, end_chapter=None, volume=False):\n if not novel_id:\n raise Exception('Novel ID is required')\n # end if\n\n self.chapters = []\n self.novel_id = novel_id\n self.start_chapter = start_chapter\n self.end_chapter = end_chapter\n self.output_path = None\n self.pack_by_volume = volume\n\n self.home_url = 'https://lnmtl.com'\n self.login_url = 'https://lnmtl.com/auth/login'\n self.logout_url = 'https://lnmtl.com/auth/logout'\n self.email = 'dipu@algomatrix.co'\n self.password = 'twill1123'\n\n self.headers = {\n 'accept': 'text/html,application/xhtml+xml,application/xml',\n 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'\n }\n requests.urllib3.disable_warnings()\n # end def\n\n def start(self):\n '''start crawling'''\n # if path.exists(self.output_path):\n # rmtree(self.output_path)\n try:\n if self.start_chapter:\n if not self.login():\n print('Failed to login')\n else:\n print('Logged in.')\n # end if\n self.get_chapter_list()\n self.get_chapter_bodies()\n self.logout()\n # end if\n finally:\n self.output_path = self.output_path or self.novel_id\n novel_to_epub(self.output_path, self.pack_by_volume)\n novel_to_mobi(self.output_path)\n # end try\n # end def\n\n def login(self):\n '''login to LNMTL'''\n print('Visiting', self.login_url)\n response = requests.get(self.login_url, headers=self.headers, verify=False)\n self.headers['cookie'] = '; '.join([x.name + '=' + x.value for x in response.cookies])\n soup = BeautifulSoup(response.text, 'lxml')\n headers = self.headers.copy()\n headers['content-type'] = 'application/x-www-form-urlencoded'\n body = {\n '_token': soup.select_one('form input[name=\"_token\"]')['value'],\n 'email': self.email,\n 'password': self.password\n }\n print('Attempt login...')\n response = requests.post(self.login_url, data=body, headers=headers, verify=False)\n self.headers['cookie'] = '; '.join([x.name + '=' + x.value for x in response.cookies])\n soup = BeautifulSoup(response.text, 'lxml')\n logout = soup.select_one('a[href=\"%s\"]' % self.logout_url)\n if logout is None:\n print('-' * 80)\n body = soup.select_one('body').text\n print('\\n\\n'.join([x for x in body.split('\\n\\n') if len(x.strip())]))\n print('-' * 80)\n return False\n # end if\n return True\n # end def\n\n def logout(self):\n '''logout as a good citizen'''\n print('Attempt logout...')\n response = requests.get(self.logout_url, headers=self.headers)\n soup = BeautifulSoup(response.text, 'lxml')\n logout = soup.select_one('a[href=\"%s\"]' % self.logout_url)\n if logout is None:\n print('Logged out.')\n else:\n print('Failed to logout.')\n # end if\n # end def\n\n def get_chapter_list(self):\n '''get list of chapters'''\n url = '%s/novel/%s' % (self.home_url, self.novel_id)\n print('Visiting', url)\n response = requests.get(url, headers=self.headers, verify=False)\n soup = BeautifulSoup(response.text, 'lxml')\n self.novel_author = 'N/A'\n try:\n self.novel_name = soup.select_one('.novel .media .novel-name').text\n self.novel_name = self.novel_name.rsplit(' ', 1)[0]\n self.novel_cover = soup.find('img', {\"title\" : self.novel_name})['src']\n except:\n self.novel_cover = None\n # end try\n self.output_path = re.sub('[\\\\\\\\/*?:\"<>|]' or r'[\\\\/*?:\"<>|]', '', self.novel_name or self.novel_id)\n for script in soup.find_all('script'):\n text = script.text.strip()\n if not text.startswith('window.lnmtl'):\n continue\n # end if\n i,j = text.find('lnmtl.volumes = '),text.find(';lnmtl.route')\n if i <= 0 and j <= i:\n continue\n # end if\n i += len('lnmtl.volumes =')\n self.volumes = json.loads(text[i:j].strip())\n # end for\n print(len(self.volumes), 'volumes found. Getting chapters...')\n\n self.chapters = []\n future_to_url = {}\n page_url = '%s/chapter?page=1' % (self.home_url)\n for vol in self.volumes:\n task = self.executor.submit(self.get_chapters_by_volume, vol['id'], page_url)\n future_to_url[task] = vol['id']\n # end for\n for future in concurrent.futures.as_completed(future_to_url):\n concurrent.futures.wait(future.result())\n # end for\n\n self.chapters = sorted(self.chapters, key=lambda x: int(x['position']))\n print('> [%s]' % self.novel_name, len(self.chapters), 'chapters found')\n # end def\n\n def get_chapters_by_volume(self, vol_id, page_url):\n url = '%s&volumeId=%s' % (page_url, vol_id)\n print('Visiting', url)\n response = requests.get(url, headers=self.headers, verify=False)\n result = response.json()\n page_url = result['next_page_url']\n for chapter in result['data']:\n self.chapters.append(chapter)\n # end for\n future_to_url = {}\n if result['current_page'] == 1:\n for page in range(1, result['last_page']):\n page_url = '%s/chapter?page=%s' % (self.home_url, page + 1)\n task = self.executor.submit(self.get_chapters_by_volume, vol_id, page_url)\n future_to_url[task] = '%s-%s' % (vol_id, page)\n # end for\n # end if\n return future_to_url\n # end def\n\n def get_chapter_index(self, chapter):\n if chapter is None: return\n for i, chap in enumerate(self.chapters):\n if chap['site_url'] == chapter or chap['number'] == chapter:\n return i\n # end if\n # end for\n raise Exception('Invalid chapter url')\n # end def\n\n def get_chapter_bodies(self):\n '''get content from all chapters till the end'''\n self.start_chapter = self.get_chapter_index(self.start_chapter)\n self.end_chapter = self.get_chapter_index(self.end_chapter) or len(self.chapters) - 1\n if self.start_chapter is None: return\n start = self.start_chapter - 1\n end = min(self.end_chapter + 1, len(self.chapters))\n future_to_url = {self.executor.submit(self.parse_chapter, index):\\\n index for index in range(start, end)}\n # wait till finish\n [x.result() for x in concurrent.futures.as_completed(future_to_url)]\n print('Finished crawling.')\n # end def\n\n def get_volume(self, vol_id, chapter_no):\n for index, vol in enumerate(self.volumes):\n if vol['id'] == vol_id:\n return vol['name'] if 'name' in vol else str(index + 1)\n # end if\n # end for\n return str(int(chapter_no) // 100)\n # end def\n\n def parse_chapter(self, index):\n '''Parse the content of the chapter page'''\n url = self.chapters[index]['site_url']\n print('Crawling', url)\n response = requests.get(url, headers=self.headers, verify=False)\n soup = BeautifulSoup(response.text, 'lxml')\n logout = soup.select_one('a[href=\"%s\"]' % self.logout_url)\n if logout is None:\n print('WARNING: not logged in')\n # end if\n volume_no = self.chapters[index]['volume_id']\n chapter_no = self.chapters[index]['position']\n\n chapter_title = self.chapters[index]['title']\n chapter_title = '#%s %s' % (chapter_no, chapter_title)\n body = soup.select('.chapter-body .translated')\n body = [self.format_text(x.text) for x in body if x]\n body = '\\n'.join(['

%s

' % (x) for x in body if len(x)])\n # save data\n save_chapter({\n 'url': url,\n 'novel': self.novel_name,\n 'cover':self.novel_cover,\n 'author': self.novel_author,\n 'volume_no': str(volume_no),\n 'chapter_no': chapter_no,\n 'chapter_title': chapter_title,\n 'body': '

%s

%s' % (chapter_title, body)\n }, self.output_path, self.pack_by_volume)\n # end def\n\n def format_text(self, text):\n '''formats the text and remove bad characters'''\n text = text.replace(u'\\u00ad', '')\n text = re.sub(r'\\u201e[, ]*', '“', text)\n text = re.sub(r'\\u201d[, ]*', '”', text)\n text = re.sub(r'[ ]*,[ ]+', ', ', text)\n return text.strip()\n # end def\n# end class\n\n\nif __name__ == '__main__':\n LNMTLCrawler(\n novel_id=sys.argv[1],\n start_chapter=sys.argv[2] if len(sys.argv) > 2 else '',\n end_chapter=sys.argv[3] if len(sys.argv) > 3 else '',\n volume=sys.argv[4].lower() == 'true' if len(sys.argv) > 4 else ''\n ).start()\n# end if\n","sub_path":"ebook_crawler/lnmtl.py","file_name":"lnmtl.py","file_ext":"py","file_size_in_byte":9686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"102458182","text":"#shortest path around\nimport sys\ninput = sys.stdin.readline\nfrom queue import *\nfrom pprint import *\nfor _ in range(5):\n #make graph\n graph = [[0]*8 for i in range(8)]\n ends = []\n\n for x in range(8):\n line = input()\n for y in range(8):\n if line[y] == \".\" or line[y] == \"A\" or line[y] == \"B\":\n row = []\n for q in (-1,0,1):\n xx = x+q\n for w in (-1,0,1):\n yy = y+w\n if 0<=xx<8 and 0<=yy<8:\n row.append((xx,yy))\n graph[x][y] = row\n \n if line[y] == \"A\" or line[y] == \"B\":\n ends.append((x,y))\n elif line[y] == \"#\":\n graph[x][y] = []\n \n #model shortest path method\n\n (startx,starty) = ends[0]\n (endx,endy) = ends[1]\n dist = [[9999]*8 for i in range(10)]\n dist[startx][starty] = 0\n nextup = Queue()\n nextup.put((startx,starty))\n\n while not nextup.empty():\n cur = nextup.get()\n \n cx,cy = cur\n\n for nx,ny in graph[cx][cy]:\n if dist[cx][cy] + 1 < dist[nx][ny]:\n dist[nx][ny] = dist[cx][cy] + 1\n nextup.put((nx,ny))\n \n print(dist[endx][endy])\n","sub_path":"DWITE/DWITE '08 R4 #4 - Shortest path around v2.py","file_name":"DWITE '08 R4 #4 - Shortest path around v2.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"566694965","text":"import networkx as nx\nimport pandas as pd\nimport numpy as np\n\nfl = pd.read_table('temp.txt', header=None, sep='\\t')\nnum = len(fl)\n\narr = np.zeros((num, num))\n\nfor i in range(num):\n for j in range(num):\n arr[i][j] = fl[i][j]\n\nFG = nx.from_numpy_matrix(arr)\ndegree = FG.degree(weight='weight')\ndegree_centrality = nx.degree_centrality(FG)\ncloseness_centrality = nx.closeness_centrality(FG)\nbetweenness_centrality = nx.betweenness_centrality(FG, weight='weight')\neigenvector_centrality = nx.eigenvector_centrality(FG, nstart=None, weight='weight')\n\nfor qwe in range(num):\n print(degree[qwe],degree_centrality[qwe],closeness_centrality[qwe],betweenness_centrality[qwe],eigenvector_centrality[qwe])","sub_path":"algorithm/centrality/centrality.py","file_name":"centrality.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"163349004","text":"import random\nimport sys\n\nt = int(input())\nfor i in range(t):\n a = int(input())\n\n X = (a // 3) + 1\n\n A = {(x + 1, y + 1) for x in range(X) for y in [0, 1, 2]}\n\n while A:\n x, y = random.sample(A, 1)[0]\n x = min(X - 1, max(2, x))\n print(x, 2)\n sys.stdout.flush()\n x_, y_ = [int(s) for s in input().split(\" \")]\n if (x_, y_) == (0, 0):\n # done\n # print(\"COMPLETED with A = {0}\".format(A), file=sys.stderr)\n break\n elif (x_, y_) == (-1, -1):\n # error\n # print(\"Error with output {0},{1} and input {2},{3}\".format(x, y, x_, y_), file=sys.stderr)\n break\n elif (x_, y_) in A:\n # print(\"Removed {0},{1} from A, remaining items {2}\".format(x_, y_, len(A)), file=sys.stderr)\n A.remove((x_, y_))\n\n","sub_path":"src/2018/firsta/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"440125513","text":"from torchvision import models\r\nimport torch\r\nfrom torch import nn\r\nimport math\r\nimport torch.nn.functional as F\r\nimport numpy as np\r\n\r\nstride_prod = 16\r\nroi_size = 7\r\nroi_pad = 0\r\n\r\nclass fast_rcnn_net(nn.Module):\r\n\r\n def __init__(self, output_size):\r\n super(fast_rcnn_net, self).__init__()\r\n\r\n # TODO: use vgg16 as ConvNet\r\n self.features = nn.Sequential(\r\n # 0-0 conv layer: 3 * 360 * 640 -> 64 * 360 * 640\r\n nn.Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),\r\n nn.ReLU(),\r\n\r\n # 0-1 conv layer: 64 * 360 * 640 -> 64 * 360 * 640\r\n nn.Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),\r\n nn.ReLU(),\r\n\r\n # 0 max pooling: 64 * 360 * 640 -> 64 * 180 * 320\r\n nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False),\r\n\r\n # 1-0 conv layer: 64 * 180 * 320 -> 128 * 180 * 320\r\n nn.Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),\r\n nn.ReLU(),\r\n\r\n # 1-1 conv layer: 128 * 180 * 320 -> 128 * 180 * 320\r\n nn.Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),\r\n nn.ReLU(),\r\n\r\n # 1 max pooling: 128 * 180 * 320 -> 128 * 90 * 160\r\n nn.MaxPool2d(kernel_size=2, stride =2, padding=0, dilation=1, ceil_mode=False),\r\n\r\n # 2-0 conv layer: 128 * 90 * 160 -> 256 * 90 * 160\r\n nn.Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),\r\n nn.ReLU(),\r\n\r\n # 2-1 conv layer: 256 * 90 * 160 -> 256 * 90 * 160\r\n nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),\r\n nn.ReLU(),\r\n\r\n # 2-2 conv layer: 256 * 90 * 160 -> 256 * 90 * 160\r\n nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),\r\n nn.ReLU(),\r\n\r\n # 2 max pooling: 256 * 90 * 160 -> 256 * 45 * 80\r\n nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False),\r\n\r\n # 3-0 conv layer: 256 * 45 * 80 -> 512 * 45 * 80\r\n nn.Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),\r\n nn.ReLU()\r\n\r\n )\r\n\r\n # according to the fast-rcnn paper, it is unnecessary to change the parameters of the first 8 conv layers, thus we freeze these conv layers so the parameters won't be updated during training\r\n for param in self.parameters():\r\n param.requires_grad = False # freeze these parameters\r\n \r\n # from here on, the parameters will be updated by back-propagation\r\n self.features_unfreeze = nn.Sequential(\r\n # 3-1 conv layer: 512 * 45 * 80 -> 512 * 45 * 80\r\n nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),\r\n nn.ReLU(),\r\n\r\n # 3-2 conv layer: 512 * 45 * 80 -> 512 * 45 * 80\r\n nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),\r\n nn.ReLU(),\r\n\r\n # 3 max pooling: 512 * 45 * 80 -> 512 * 22 * 40\r\n nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False),\r\n\r\n # 4-0 conv layer: 512 * 22 * 40 -> 512 * 22 * 40\r\n nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),\r\n nn.ReLU(),\r\n\r\n # 4-1 conv layer: 512 * 22 * 40 -> 512 * 22 * 40\r\n nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),\r\n nn.ReLU(),\r\n\r\n # 4-2 conv layer: 512 * 22 * 40 -> 512 * 22 * 40\r\n nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),\r\n nn.ReLU()\r\n )\r\n\r\n # TODO: the ROI Pooling layer\r\n # the last max pooling layer is replaced by a ROI pooling layer that is configured by setting H=W=roi_size: roi_size * roi_size * 512\r\n self.roi_pooling = nn.AdaptiveMaxPool2d((roi_size, roi_size), return_indices=False)\r\n\r\n # TODO: continue the vgg fully connected layer\r\n self.classifier = nn.Sequential(\r\n # 0 fully connected\r\n nn.Linear(in_features=roi_size*roi_size*512, out_features=4096, bias=True),\r\n nn.ReLU(),\r\n nn.Dropout(p=0.5),\r\n\r\n # 1 fully connected\r\n nn.Linear(in_features=4096, out_features=4096, bias=True),\r\n nn.ReLU(),\r\n nn.Dropout(p=0.5)\r\n )\r\n\r\n # TODO: two sibling output layer: one that produces softmax probability estimates, another outputs four real-valued numbers for each of the object class\r\n self.class_score_layer = nn.Linear(in_features=4096, out_features=output_size, bias=False)\r\n\r\n self.bbox_target_layer = nn.Linear(in_features=4096, out_features=(output_size-1)*4, bias=False)\r\n\r\n ## bbox regressor uses the parameterization for regression targets given in paper \"Rich feature hierarchies for accurate object detection and semantic segmentation\"\r\n def bbox_target_to_pred_bbox(self, region_proj, bbox_target):\r\n box = torch.Tensor(region_proj)\r\n\r\n r, c, w, h = box[0], box[1], box[2], box[3]\r\n\r\n dr = bbox_target[0::4]\r\n dc = bbox_target[1::4]\r\n dw = bbox_target[2::4]\r\n dh = bbox_target[3::4]\r\n\r\n pred_bbox = torch.zeros(bbox_target.size(), dtype=bbox_target.dtype)\r\n\r\n pred_bbox[0::4] = w * dr + r\r\n pred_bbox[1::4] = h * dc + c\r\n pred_bbox[2::4] = w * torch.Tensor(np.exp(dw.detach()))\r\n pred_bbox[3::4] = h * torch.Tensor(np.exp(dh.detach()))\r\n\r\n for i in range(len(pred_bbox.detach())):\r\n if i % 4 == 0 or i % 4 == 1:\r\n pred_bbox[i] = math.ceil(pred_bbox[i] * stride_prod) - 1\r\n if i % 4 == 2 or i % 4 == 3:\r\n pred_bbox[i] = math.floor(pred_bbox[i] * stride_prod) + 1\r\n\r\n return pred_bbox\r\n\r\n def forward_feature (self, x):\r\n feature_maps = self.features(x)\r\n #feature_maps = self.features_unfreeze(feature_maps)\r\n return feature_maps\r\n\r\n def forward_output (self, x, region_projs):\r\n size = x.detach().size()\r\n output = torch.Tensor(size[0], size[1], roi_size, roi_size)\r\n for idx in range(size[0]):\r\n (r, c, w, h) = (int(z) for z in region_projs[idx])\r\n output[idx] = self.roi_pooling(F.pad(x[idx, :, c: c+h, r: r+w], (roi_pad, roi_pad, roi_pad, roi_pad)))\r\n output = self.classifier(output.view(size[0], -1))\r\n clf_scores = self.class_score_layer(output)\r\n clf_scores = F.softmax(clf_scores, dim=1)\r\n bbox_targets = self.bbox_target_layer(output)\r\n bbox_pred = torch.Tensor(bbox_targets.detach().size())\r\n for idx in range(len(region_projs)):\r\n bbox_pred[idx] = self.bbox_target_to_pred_bbox(region_projs[idx], bbox_targets[idx])\r\n return clf_scores, bbox_pred\r\n return clf_scores, bbox_targets\r\n\r\n\r\ndef map_region_proposals_to_feature_map (rps):\r\n rp_projs = []\r\n for rp in rps:\r\n (r1, c1, w, h) = rp # (r1, c1) is the top-left corner of the region proposal, w is width, h is height\r\n r2, c2 = r1 + w - 1, c1 + h - 1 # (r2, c2) is the bottom-right corner\r\n\r\n r1_ = min(math.floor(r1 / stride_prod) + 1, 38) # max index is 39, but we have to guarantee that the projection has at least width 1\r\n c1_ = min(math.floor(c1 / stride_prod) + 1, 20) # max index is 21, ...\r\n r2_ = math.ceil(r2 / stride_prod) - 1\r\n c2_ = math.ceil(c2 / stride_prod) - 1\r\n w = max(1.0, r2_-r1_+1)\r\n h = max(1.0, c2_-c1_+1)\r\n rp_projs.append((r1_, c1_, w, h))\r\n return rp_projs\r\n\r\n\r\ndef smooth_multi_task_loss (clf_scores, clf_gtruth, bbox_pred, bbox_gtruth, bbox_label, lambda_):\r\n loss = torch.zeros(len(clf_gtruth))\r\n #criterion = nn.CrossEntropyLoss()\r\n for idx in range(len(clf_gtruth)):\r\n loss_cls = torch.Tensor([- math.log(max(clf_scores[idx][int(clf_gtruth[idx].item())], 1e-45))]).squeeze(0)\r\n loss_cls.requires_grad_()\r\n u = int(bbox_label[idx].item())\r\n if u > 0:\r\n loss_bbox = F.smooth_l1_loss(bbox_pred[idx][(u - 1)*4: u*4], bbox_gtruth[idx].type(torch.float), reduction=\"sum\")\r\n else:\r\n loss_bbox = 0\r\n\r\n loss[idx] = loss_cls + lambda_ * loss_bbox\r\n\r\n return loss.mean(dim=0)","sub_path":"fast_RCNN_version/fast_rcnn_network.py","file_name":"fast_rcnn_network.py","file_ext":"py","file_size_in_byte":8368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"433818116","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function, absolute_import, division\n__docformat__ = \"restructuredtext en\"\n\n# disable: accessing protected members, too many methods\n# pylint: disable=W0212,R0904\n\nfrom zope.component.hooks import setHooks\n\nfrom nti.testing.layers import GCLayerMixin\nfrom nti.testing.layers import ZopeComponentLayer\nfrom nti.testing.layers import ConfiguringLayerMixin\n\nimport zope.testing.cleanup\n\n\nclass SharedConfiguringTestLayer(ZopeComponentLayer,\n GCLayerMixin,\n ConfiguringLayerMixin):\n\n set_up_packages = ('nti.zodb',)\n\n @classmethod\n def setUp(cls):\n setHooks()\n cls.setUpPackages()\n\n @classmethod\n def tearDown(cls):\n cls.tearDownPackages()\n zope.testing.cleanup.cleanUp()\n\n @classmethod\n def testSetUp(cls, test=None):\n setHooks()\n\n @classmethod\n def testTearDown(cls):\n pass\n","sub_path":"src/nti/zodb/tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"124164990","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# Created at 9/5/18 5:44 PM\n# @Author : allen\n\nimport argparse\nimport collections\nimport csv\nfrom datetime import datetime\nimport json\nimport os\nimport sys\nimport wave\n\nimport ffmpy3\nfrom minio import Minio\nfrom minio.error import ResponseError, BucketAlreadyOwnedByYou, BucketAlreadyExists\nimport rethinkdb\n\n\n# rethinkdb.make_timezone(\"+08:00\")\n\"\"\" \nminioClient = Minio('10.161.32.67:9000',\n access_key='MTIWGRQUTV4P270FDO1U',\n secret_key='n/Fy0peAhmgt2lA5d0DOxtmviuZ498VHe4M28PlQ',\n secure=False)\n\"\"\"\n############################################################################\n## data resource and storage information\n\n# rethink db information\nfxn_meeting_db = {\n 'host': \"10.157.165.230\",\n 'db': \"FXN_Meeting_ASR\",\n 'table': \"meeting_asr\"\n}\n# corpus information\ncorpus_setting = {\n 'FXN_Meeting_asr': 'FXN-Meeting-asr'\n}\n\n# text_csv_resource_file\nraw_file_resource = {\n 'vctk_script': '/home/allen/speech_record/data/2018-09-04/Untitled31/transcript.csv',\n}\n\n#############################################################\n\"\"\"\n connect to RethinkDB: F\n\n\"\"\"\n\n\nclass Rethinkdb_pool(object):\n\n def __init__(self, args):\n \"\"\"initialize the FXN_Meeting_ASR DB\n \"\"\"\n self.host = args.db_host\n self.db = args.db\n self.item_table = args.table\n self.conn = rethinkdb.connect(self.host, db=self.db).repl()\n self.create_recording_table()\n\n def create_recording_table(self, ):\n table_list = rethinkdb.db(self.db).table_list().run(self.conn)\n if not self.item_table in table_list:\n rethinkdb.db(self.db).table_create(self.item_table).run(self.conn)\n\n def insert_table(self, data):\n \"\"\"\n :param data: dict data\n :return:\n \"\"\"\n rethinkdb.table(self.item_table).insert(data).run(self.conn)\n # rethinkdb.table(self.item_table).update(data).run(self.conn)\n\n\n################################################################################################\n\"\"\"\n Part 2: Minio cloud connection\n\n\"\"\"\n\n\nclass Minio_auido_pool(object):\n \"\"\"Initialize minioClient with an endpoint and access/secret keys.\n \"\"\"\n\n def __init__(self, args):\n self.host = args.minio_host_port\n self.access_key = args.minio_access_key\n self.secret_key = args.minio_secret_key\n self.corpus = args.bucket\n self.minio = Minio(self.host, self.access_key, self.secret_key, secure=False)\n self.create_bucket()\n\n def create_bucket(self, ):\n \"\"\"\n bucket is main folder name.\n :return:\n \"\"\"\n corpus = self.modify_bucket_name_4_minio() # minio bucket name has special format\n\n if self.minio.bucket_exists(corpus) == False:\n try:\n self.minio.make_bucket(corpus, location=\"us-east-1\")\n except BucketAlreadyOwnedByYou as err:\n pass\n except BucketAlreadyExists as err:\n pass\n except ResponseError as err:\n raise\n\n return\n\n def modify_bucket_name_4_minio(self, ):\n \"\"\"\n 需要用正则修正\n :return:\n \"\"\"\n return self.corpus.replace('-', '').lower()\n\n\n def upload_audio_under_folder(self, object_folder, audio_name, file_path):\n # Put an object audio\n \"\"\"\n :param object_folder:\n :param audio_name:\n :param file_path: resource location(absolute path)\n :return:\n \"\"\"\n bucket = self.modify_bucket_name_4_minio()\n try:\n object_name = os.path.join(object_folder, audio_name)\n self.minio.fput_object(bucket, object_name, file_path)\n except ResponseError as err:\n print(err)\n\n\n################################################################################################\n\"\"\"\n Part 3: text relatd Reading by python\n\n\"\"\"\n\n\ndef get_meeting_info(file):\n\n with open(file, 'r', encoding='utf-8') as f:\n info = f.readline()\n return json.loads(info)\n\n\ndef get_audio_txt(file):\n \"\"\"\n :param file: text file: just one line in the text file\n :return: string\n \"\"\"\n with open(file, 'r', encoding='utf-8') as f:\n lines = csv.reader(f)\n header = next(lines)\n for line in lines:\n yield line\n\n################################################################################################\n\"\"\"\n Part 4: read audios' characteristics\n\"\"\"\n\n\ndef wave_read(path):\n wavfile = wave.open(path, \"rb\")\n framerate = wavfile.getframerate()\n nframes = wavfile.getnframes()\n duration = round(nframes / float(framerate), 4)\n return framerate, duration\n\ndef downsample(path):\n pre_dir = os.path.dirname(path)\n if not os.path.isdir(pre_dir + '/16k'):\n os.mkdir(pre_dir + '/16k')\n basename = os.path.basename(path)\n new_path = os.path.join(pre_dir, '16k', basename)\n ff = ffmpy3.FFmpeg(inputs={path: None}, outputs={new_path: '-ar 16k -n'})\n try:\n ff.run()\n except AttributeError as err:\n pass\n return new_path\n\n################################################################################################\n\"\"\"\n Part 5: Process customer's data insert data into Rethinkdb and minio cloud\n\n\"\"\"\n\ndef create_per_dict(info, item, date, audio_name, folder, duration, framerate, corpus):\n id_script_dict = {}\n # audio_name = os.path.basename(item[0])\n vctkID = 'VCTK0000'\n confidence = item[2].replace(' ', '')\n likelihood = item[3].replace(' ', '')\n asr_text = item[4]\n dt_fmt = datetime.strptime(audio_name[:-11], '%Y-%m-%dT%H-%M-%S')\n timestamp = rethinkdb.iso8601(dt_fmt.isoformat() + '+08:00')\n location = '%s/%s/%s' % (corpus, folder, audio_name)\n\n id_script_dict.update({'audio_name': audio_name})\n id_script_dict.update({'audio_location': location})\n id_script_dict.update({\"audio_duration\": duration})\n id_script_dict.update({\"audio_rate\": framerate})\n id_script_dict.update({'audio_mean_volume': ''})\n id_script_dict.update({'audio_max_volume': ''})\n id_script_dict.update({'confidence': eval(confidence)})\n id_script_dict.update({'likelihood': eval(likelihood)})\n id_script_dict.update({'asr_text': asr_text})\n id_script_dict.update({'timestamp': timestamp})\n id_script_dict.update({'date': date})\n id_script_dict.update({'locale': info['locale'].replace('会议室', 'meeting room')})\n id_script_dict.update({'topic': info['topic']})\n id_script_dict.update({'attendees': info['attendees']})\n id_script_dict.update({'txt_segmentation': ''})\n id_script_dict.update({'language': 'mandarin'})\n id_script_dict.update({'language_mixed': ''})\n id_script_dict.update({'reviewed_txt': ''})\n id_script_dict.update({'audio_quality': 'IDK'})\n id_script_dict.update({'corpus': corpus})\n id_script_dict.update({'speakerID': vctkID})\n id_script_dict.update({'spk_birth': ''})\n id_script_dict.update({'audio_gender': ''})\n id_script_dict.update({'audio_accent': ''})\n id_script_dict.update({'audio_age': ''})\n\n return id_script_dict\n\ndef init_db_cloud(args):\n \"\"\"\n connect to rethinkdb: db: FXN_Meeting_ASR\n connect mino\n \"\"\"\n r = Rethinkdb_pool(args)\n minio = Minio_auido_pool(args)\n return r, minio\n\ndef main_update_audio_info(script_path, args):\n r, minio = init_db_cloud(args)\n bucket = minio.modify_bucket_name_4_minio()\n # main_script_dir = raw_file_resource.get('vctk_script')\n info = get_meeting_info(script_path)\n items = get_audio_txt(script_path)\n\n date = datetime.strptime(info['date'], '%a %b %d %H:%M:%S %Y')\n object_folder = '{}/{}'.format(date.strftime('%Y-%m'), date.strftime('%Y-%m-%d'))\n\n date_rfmt = rethinkdb.iso8601(date.isoformat() + '+08:00')\n\n pre_dir = info['fullpath']\n count = 0\n for item in items:\n ori_path = os.path.join(pre_dir, item[0])\n audio_path = downsample(ori_path)\n basename = os.path.basename(audio_path)\n framerate, duration = wave_read(audio_path)\n per_dict = create_per_dict(info, item, date_rfmt, basename, object_folder, duration, framerate, bucket)\n minio.upload_audio_under_folder(object_folder, basename, audio_path)\n r.insert_table(per_dict)\n count += 1\n print(count)\n\n\ndef get_path(d):\n script_list = []\n dirs = collections.deque()\n while 1:\n for s in os.listdir(d):\n path = os.path.join(d, s)\n if s == 'transcript.csv':\n script_list.append(path)\n elif os.path.isdir(path):\n dirs.append(path)\n try:\n d = dirs.popleft()\n except IndexError:\n break\n return script_list\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Command line for uploading audio and transcript.')\n parser.add_argument('-d', '--directory', dest=\"d\", default=None, help=\"Upload all files under the directory.\")\n parser.add_argument('-db', '--database', dest='db', default='FXN_Meeting_ASR', help=\"Database of Rethinkdb.\")\n parser.add_argument('-t', '--table', dest=\"table\", default='meeting_asr', help=\"Table of Rethinkdb.\")\n parser.add_argument('-b', '--bucket', dest=\"bucket\", default='FXN-Meeting-asr', help=\"Bucket of Minio.\")\n parser.add_argument('--db-host', default='10.157.165.230', help=\"Rethinkdb host.\")\n parser.add_argument('--minio-host-port', default='10.161.32.67:9000', help=\"Rethinkdb host.\")\n parser.add_argument('--minio-access-key', default='MTIWGRQUTV4P270FDO1U', help=\"Minio access key.\")\n parser.add_argument('--minio-secret-key', default='n/Fy0peAhmgt2lA5d0DOxtmviuZ498VHe4M28PlQ', help=\"Minio secret key.\")\n # parser.add_argument('script_file', help=\"Audio file to be sent to the server\", type=argparse.FileType('r'), default=sys.stdin)\n parser.add_argument('-s', '--script-file', dest=\"script\", help=\"a file contains transcript info.\")\n args = parser.parse_args()\n\n if args.d:\n script_list = get_path(args.d)\n else:\n script_list = [args.script]\n # script_dir = '/home/allen/speech_record/data/2018-09-04/Untitled41/transcript.csv'\n for script in script_list:\n main_update_audio_info(script, args)","sub_path":"release/upload_file-v0.0.1.py","file_name":"upload_file-v0.0.1.py","file_ext":"py","file_size_in_byte":10376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"592976287","text":"from CRABClient.UserUtilities import config, getUsernameFromSiteDB\nconfig = config()\n\nconfig.section_('General')\nconfig.General.requestName = 'TNP_Pythia8_JpsiMM_ptJpsi_03_06_Hydjet_MB_HINPbPbWinter16DR-75X_mcRun2_HeavyIon_v13_ext1-v1'\nconfig.General.workArea = 'crab_projects'\nconfig.General.transferOutputs = True\nconfig.General.transferLogs = False\n\nconfig.section_('JobType')\nconfig.JobType.pluginName = 'Analysis'\nconfig.JobType.psetName = 'tnp_PbPb_mc.py'\nconfig.JobType.maxMemoryMB = 2500\nconfig.JobType.outputFiles = ['tnpJPsi_MC_PbPb_AOD.root']\n\nconfig.section_('Data')\nconfig.Data.inputDataset = '/Pythia8_JpsiMM_ptJpsi_03_06_Hydjet_MB/HINPbPbWinter16DR-75X_mcRun2_HeavyIon_v13_ext1-v1/AODSIM'\nconfig.Data.inputDBS = 'global'\nconfig.Data.unitsPerJob = 1\nconfig.Data.splitting = 'FileBased'\nconfig.Data.outLFNDirBase = '/store/group/phys_heavyions/%s/TagAndProbe2015/%s' % (getUsernameFromSiteDB(), config.General.requestName)\nconfig.Data.publication = False\n\nconfig.section_('Site')\nconfig.Site.storageSite = 'T2_CH_CERN'\n","sub_path":"test/jpsiHI/crabConfig_tnp_PbPb_mc.py","file_name":"crabConfig_tnp_PbPb_mc.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"107814977","text":"from tqdm import tqdm\nimport os\nimport shutil\n\n\ndef move_subdataset_to_prep(target_dataset, target_subdataset):\n base_dataset_path = f\"{os.path.join(os.getcwd(), 'datasets', target_dataset)}\"\n\n subdataset_test = os.path.join(base_dataset_path, \"test\", target_subdataset)\n subdataset_train = os.path.join(base_dataset_path, \"train\", target_subdataset)\n\n if not os.path.isdir(subdataset_test):\n print(f\"{subdataset_test} is not a directory\")\n return\n\n if not os.path.isdir(subdataset_train):\n print(f\"{subdataset_train} is not a directory\")\n return\n\n destination = os.path.join(os.getcwd(), \"prep_tiles\", \"tiles\")\n\n all_files = [os.path.join(subdataset_test, file) for file in os.listdir(subdataset_test)]\n all_files.extend([os.path.join(subdataset_train, file) for file in os.listdir(subdataset_train)])\n move_all_files(all_files, destination)\n\ndef move_all_files(target_files, destination):\n for file in tqdm(target_files):\n shutil.move(file, destination)","sub_path":"move_data_to_prep.py","file_name":"move_data_to_prep.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"440231260","text":"import logging\n\nfrom flask import Response, request, current_app\nfrom connexion import problem\nfrom werkzeug.exceptions import Unauthorized\n\nfrom rhub.tower import model\nfrom rhub.tower.client import TowerError\nfrom rhub.api import db\nfrom rhub.api.utils import row2dict\nfrom rhub.auth.utils import route_require_admin, user_is_admin\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef _tower_job(db_row, tower_data):\n \"\"\"\n Utility to format DB row and Tower data into valid TowerJob as defined in\n OpenAPI schema.\n \"\"\"\n return {\n **row2dict(db_row),\n 'status': tower_data['status'],\n 'created_at': tower_data['created'],\n 'started': tower_data['started'] is not None,\n 'started_at': tower_data['started'],\n 'finished': tower_data['finished'] is not None,\n 'finished_at': tower_data['finished'],\n 'failed': tower_data['failed'],\n }\n\n\ndef list_servers():\n servers = model.Server.query.all()\n return [row2dict(server) for server in servers], 200\n\n\n@route_require_admin\ndef create_server(body, user):\n body.setdefault('description', '')\n\n server = model.Server(**body)\n db.session.add(server)\n db.session.commit()\n return row2dict(server)\n\n\ndef get_server(server_id):\n server = model.Server.query.get(server_id)\n if not server:\n return problem(404, 'Not Found', f'Server {server_id} does not exist')\n return row2dict(server)\n\n\n@route_require_admin\ndef update_server(server_id, body, user):\n server = model.Server.query.get(server_id)\n if not server:\n return problem(404, 'Not Found', f'Server {server_id} does not exist')\n\n for k, v in body.items():\n setattr(server, k, v)\n db.session.commit()\n\n return row2dict(server)\n\n\n@route_require_admin\ndef delete_server(server_id, user):\n server = model.Server.query.get(server_id)\n if not server:\n return problem(404, 'Not Found', f'Server {server_id} does not exist')\n\n db.session.delete(server)\n db.session.commit()\n\n\ndef list_templates():\n templates = model.Template.query.all()\n return [row2dict(template) for template in templates]\n\n\n@route_require_admin\ndef create_template(body, user):\n body.setdefault('description', '')\n\n template = model.Template(**body)\n db.session.add(template)\n db.session.commit()\n return row2dict(template)\n\n\ndef get_template(template_id):\n template = model.Template.query.get(template_id)\n if not template:\n return problem(404, 'Not Found', f'Template {template_id} does not exist')\n\n try:\n tower_client = template.server.create_tower_client()\n\n if template.tower_template_is_workflow:\n tower_survey_data = tower_client.workflow_get_survey(\n template.tower_template_id)\n else:\n tower_survey_data = tower_client.template_get_survey(\n template.tower_template_id)\n\n return {\n **row2dict(template),\n 'tower_survey': tower_survey_data,\n }\n\n except TowerError as e:\n logger.exception(f'Failed to get template data from Tower, {e}')\n return problem(e.response.status_code, 'Error',\n f'Failed to get template {template_id} data from Tower')\n\n except Exception as e:\n logger.exception(e)\n return problem(500, 'Server Error', f'Unknown server error, {e}')\n\n\n@route_require_admin\ndef update_template(template_id, body, user):\n template = model.Template.query.get(template_id)\n if not template:\n return problem(404, 'Not Found', f'Template {template_id} does not exist')\n\n for k, v in body.items():\n setattr(template, k, v)\n db.session.commit()\n\n return row2dict(template)\n\n\n@route_require_admin\ndef delete_template(template_id, user):\n template = model.Template.query.get(template_id)\n if not template:\n return problem(404, 'Not Found', f'Template {template_id} does not exist')\n\n db.session.delete(template)\n db.session.commit()\n\n\ndef launch_template(template_id, body, user):\n template = model.Template.query.get(template_id)\n if not template:\n return problem(404, 'Not Found', f'Template {template_id} does not exist')\n\n try:\n tower_client = template.server.create_tower_client()\n extra_vars = body.get('extra_vars', {})\n\n logger.info(\n f'Launching tower template {template.id}, extra_vars={extra_vars!r}'\n )\n if template.tower_template_is_workflow:\n tower_job_data = tower_client.workflow_launch(\n template.tower_template_id, extra_vars)\n else:\n tower_job_data = tower_client.template_launch(\n template.tower_template_id, extra_vars)\n\n job = model.Job(\n template_id=template.id,\n tower_job_id=tower_job_data['id'],\n launched_by=user,\n )\n db.session.add(job)\n db.session.commit()\n\n return _tower_job(job, tower_job_data)\n\n except TowerError as e:\n logger.exception(f'Failed to launch tower template, {e}')\n\n problem_ext = None\n try:\n problem_ext = e.response.json()\n if 'detail' in problem_ext:\n del problem_ext['detail']\n except Exception:\n pass # simply ignore\n\n return problem(e.response.status_code, 'Error',\n f'Failed to launch template {template_id}',\n ext=problem_ext)\n\n except Exception as e:\n logger.exception(e)\n return problem(500, 'Server Error', f'Unknown server error, {e}')\n\n\ndef list_template_jobs(template_id, user):\n jobs = model.Job.query.filter_by(template_id=template_id)\n if not user_is_admin(user):\n jobs = jobs.filter_by(launched_by=user)\n return [row2dict(job) for job in jobs]\n\n\ndef list_jobs(user):\n if user_is_admin(user):\n jobs = model.Job.query.all()\n else:\n jobs = model.Job.query.filter_by(launched_by=user)\n return [row2dict(job) for job in jobs]\n\n\ndef get_job(job_id, user):\n job = model.Job.query.get(job_id)\n if not job:\n return problem(404, 'Not Found', f'Job {job_id} does not exist')\n\n if not user_is_admin(user) and job.launched_by != user:\n return problem(403, 'Forbidden',\n f\"You don't have permissions to view job {job_id}\")\n\n try:\n tower_client = job.server.create_tower_client()\n\n if job.template.tower_template_is_workflow:\n tower_job_data = tower_client.workflow_job_get(job.tower_job_id)\n else:\n tower_job_data = tower_client.template_job_get(job.tower_job_id)\n\n return _tower_job(job, tower_job_data)\n\n except TowerError as e:\n logger.exception(f'Failed to get job data from Tower, {e}')\n return problem(e.response.status_code, 'Error',\n f'Failed to get job {job_id} data from Tower')\n\n except Exception as e:\n logger.exception(e)\n return problem(500, 'Server Error', f'Unknown server error, {e}')\n\n\ndef relaunch_job(job_id, user):\n job = model.Job.query.get(job_id)\n if not job:\n return problem(404, 'Not Found', f'Job {job_id} does not exist')\n\n if not user_is_admin(user) and job.launched_by != user:\n return problem(403, 'Forbidden',\n f\"You don't have permissions to relaunch job {job_id}\")\n\n try:\n tower_client = job.server.create_tower_client()\n\n if job.template.tower_template_is_workflow:\n tower_job_data = tower_client.workflow_job_relaunch(job.tower_job_id)\n else:\n tower_job_data = tower_client.template_job_relaunch(job.tower_job_id)\n\n relaunched_job = model.Job(\n template_id=job.template_id,\n tower_job_id=tower_job_data['id'],\n launched_by=user,\n )\n db.session.add(relaunched_job)\n db.session.commit()\n\n return _tower_job(relaunched_job, tower_job_data)\n\n except TowerError as e:\n logger.exception(f'Failed to relaunch job, {e}')\n\n problem_ext = None\n try:\n problem_ext = e.response.json()\n if 'detail' in problem_ext:\n del problem_ext['detail']\n except Exception:\n pass # simply ignore\n\n return problem(e.response.status_code, 'Error',\n f'Failed to relaunch job {job_id}',\n ext=problem_ext)\n\n except Exception as e:\n logger.exception(e)\n return problem(500, 'Server Error', f'Unknown server error, {e}')\n\n\ndef get_job_stdout(job_id, user):\n job = model.Job.query.get(job_id)\n if not job:\n return problem(404, 'Not Found', f'Job {job_id} does not exist')\n\n if not user_is_admin(user) and job.launched_by != user:\n return problem(403, 'Forbidden',\n f\"You don't have permissions to view job {job_id} stdout\")\n\n try:\n tower_client = job.server.create_tower_client()\n tower_job_stdout = tower_client.template_job_stdout(job.tower_job_id)\n # Force text/plain response.\n return Response(tower_job_stdout, 200, content_type='text/plain')\n\n except TowerError as e:\n logger.exception(f'Failed to get job {job_id} stdout, {e}')\n return problem(e.response.status_code, 'Error',\n f'Failed to get job {job_id}')\n\n except Exception as e:\n logger.exception(e)\n return problem(500, 'Server Error', f'Unknown server error, {e}')\n\n\ndef webhook_auth(username, password, required_scopes=None):\n # Tower offers sending 'basic' auth credentials to protect access\n # to the webhook_notification() endpoint. The credentials are\n # defined/stored in Vault.\n\n try:\n if (username == current_app.config['WEBHOOK_USER']\n and password == current_app.config['WEBHOOK_PASS']):\n return {'sub': 'webhook'} # successful authentication\n\n except KeyError as e:\n logger.exception('Missing tower webhook notification credential(s)'\n f' {e}; notification ignored')\n\n logger.warning('Incorrect tower webhook notification credentials supplied;'\n ' notification ignored')\n\n raise Unauthorized('Incorrect tower webhook notification credentials'\n ' supplied')\n\n\ndef webhook_notification():\n # See Tower notification documentation for additional information:\n # https://docs.ansible.com/ansible-tower/latest/html/userguide/\n # notifications.html#webhook\n\n # 1) This function should receive the notification payload from Tower [done]\n # 2) Process the payload data: [tbd]\n # Possibly match on the jobId returned in the payload with a jobId\n # linked to a cluster stored in the DB?\n # - get_cluster_info(id) [if more information on the cluster is needed]\n # 3) Notify the user of an event: [tbd]\n # - app.io.emit(popup_message,data)\n # - update_cluster_status(id) [database operation]\n # - send_email(user.email, message) or submit the notification to Hydra?\n\n # inspect json payload to ensure certain fields are present\n try:\n jobId = request.json['id']\n jobStatus = request.json['status']\n except Exception as e:\n logger.exception(f'Unexpected tower webhook notification json; missing {e}')\n return problem(400, 'Unexpected tower webhook notification json',\n 'JSON payload missing required field(s)',\n ext={'missing': str(e)})\n\n logger.info(f'Received a notification from tower {jobId, jobStatus}')\n\n # Notify the user that something has occurred...\n # Do something here...TBD...\n pass\n\n return Response(status=204)\n","sub_path":"src/rhub/api/tower.py","file_name":"tower.py","file_ext":"py","file_size_in_byte":11777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"424294486","text":"import unittest\nimport numpy as np\nimport pycqed as pq\nimport os\n# # hack for badly installed matplotlib on maserati pc\n# import matplotlib\n# matplotlib.use('QT4Agg')\nfrom pycqed.analysis import measurement_analysis as ma\n\nfrom pycqed.analysis.tools.data_manipulation import rotate_complex\n\n\nclass Test_SSRO_discrimination_analysis(unittest.TestCase):\n\n @classmethod\n def setUpClass(self):\n self.datadir = os.path.join(pq.__path__[0], 'tests', 'test_data')\n ma.a_tools.datadir = self.datadir\n self.a_discr = ma.SSRO_discrimination_analysis(label='dummy_Butterfly',\n plot_2D_histograms=False)\n\n self.a_discr_rot = ma.SSRO_discrimination_analysis(\n label='dummy_Butterfly', theta_in=-self.a_discr.theta,\n plot_2D_histograms=False)\n\n def test_discrimination_fidelity(self):\n # Test the correct file is loaded\n self.assertEqual(\n self.a_discr.folder,\n os.path.join(self.datadir, '20161214', '120000_dummy_Butterfly'))\n mu_a = self.a_discr.mu_a\n mu_b = self.a_discr.mu_b\n\n # Test if the fit gives the expected means\n self.assertAlmostEqual(mu_a.real, -6719.6, places=1)\n self.assertAlmostEqual(mu_a.imag, 20024.2, places=1)\n self.assertAlmostEqual(mu_b.real, 1949.4, places=1)\n self.assertAlmostEqual(mu_b.imag, 37633.0, places=1)\n\n # Test identifying the rotation vector\n self.assertAlmostEqual(self.a_discr.theta % 180, 63.8, places=1)\n self.assertAlmostEqual(self.a_discr.theta % 180,\n np.angle(self.a_discr.mu_b-self.a_discr.mu_a,\n deg=True), places=1)\n diff_v_r = rotate_complex((mu_b-mu_a), -self.a_discr.theta)\n self.assertAlmostEqual(diff_v_r.imag, 0)\n\n self.assertAlmostEqual(self.a_discr.opt_I_threshold,\n np.mean([mu_a.real, mu_b.real]), places=1)\n self.assertAlmostEqual(self.a_discr.F_discr, 0.954, places=3)\n self.assertAlmostEqual(self.a_discr.F_discr_I, 0.5427, places=3)\n\n def test_rotated_discrimination_fidelity(self):\n self.assertEqual(\n self.a_discr_rot.folder,\n os.path.join(self.datadir, '20161214', '120000_dummy_Butterfly'))\n\n # self.assertAlmostEqual(self.a_discr_rot.theta, 0)\n mu_a = self.a_discr_rot.mu_a\n mu_b = self.a_discr_rot.mu_b\n self.assertAlmostEqual((mu_b-mu_a).imag/10, 0, places=0)\n\n # This test should also pass ...\n # self.assertAlmostEqual(self.a_discr_rot.F_discr, self.a_discr_rot.F_discr_I,\n # places=3)\n self.assertAlmostEqual(self.a_discr_rot.F_discr, self.a_discr.F_discr,\n places=3)\n","sub_path":"pycqed/tests/test_SSRO_analysis.py","file_name":"test_SSRO_analysis.py","file_ext":"py","file_size_in_byte":2822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"372975708","text":"#!/usr/bin/env python\nimport os\nimport urllib\n\nfrom setuptools import setup, find_packages, Command\n\nimport admin_steroids\n\n\ndef get_reqs(reqs=[\"Django>=1.4.0\"]):\n # optparse is included with Python <= 2.7, but has been deprecated in favor\n # of argparse. We try to import argparse and if we can't, then we'll add\n # it to the requirements\n try:\n import argparse\n except ImportError:\n reqs.append(\"argparse>=1.1\")\n return reqs\n\nsetup(\n name=\"django-admin-steroids\",\n version=admin_steroids.__version__,\n packages=find_packages(),\n package_data={\n 'admin_steroids': [\n 'templates/*.*',\n 'templates/*/*.*',\n 'templates/*/*/*.*',\n 'static/*.*',\n 'static/*/*.*',\n 'static/*/*/*.*',\n ],\n },\n author=\"Chris Spencer\",\n author_email=\"chrisspen@gmail.com\",\n description=\"Tweaks and tools to simplify Django admin.\",\n license=\"LGPL\",\n url=\"https://github.com/chrisspen/django-admin-steroids\",\n #https://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Framework :: Django',\n ],\n zip_safe=False,\n install_requires=get_reqs(),\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"258280258","text":"import psycopg2\nfrom psycopg2 import sql\n\nconnection = None\ncursor = None\n\n\ndef open_db_connection():\n global connection, cursor\n try:\n connection = psycopg2.connect(user=\"postgres\",\n password=\"Schouten2002\",\n host=\"127.0.0.1\",\n port=\"5432\",\n database=\"postgres\")\n cursor = connection.cursor()\n except (Exception, psycopg2.Error) as error:\n print(\"Error while connecting to PostgreSQL\", error)\n\n\ndef tweet_message(msg, name):\n from datetime import datetime\n currentdateTime = datetime.today().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n query = sql.SQL('insert into ns_berichten.tweet values (default, %s, %s, %s, null, null)')\n data = (msg, name, currentdateTime)\n execute_query(query, data)\n # cursor.execute('insert into ns_berichten.tweet values (default, %s, %s, %s, null, null)',\n # (msg, name, currentdateTime))\n print('Message saved to db')\n\n\n\ndef add_moderator(first_name, infix, last_name):\n cursor.execute('insert into ns_berichten.moderator values (default, %s, %s, %s)',\n (first_name, infix, last_name))\n\n\n\ndef add_scherm(name, location):\n cursor.execute('insert into ns_berichten.scherm values (default, %s, %s)',\n (location, name))\n\ndef close_db_connection():\n # closing database connection.\n if connection:\n cursor.close()\n connection.close()\n print(\"PostgreSQL connection is closed\")\n\n\ndef execute_query(query, data):\n open_db_connection()\n\n cursor.execute(query, data)\n connection.commit()\n\n close_db_connection()\n","sub_path":"Semester_1/Sprints/Sprint_4/postgresql_db.py","file_name":"postgresql_db.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"638070529","text":"# coding: utf-8\n# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department\n# Distributed under the terms of \"New BSD License\", see the LICENSE file.\n\nimport unittest\n\nimport numpy as np\n\nfrom pyiron_atomistics.atomistics.structure.atoms import CrystalStructure\nfrom pyiron_base._tests import TestWithProject\n\n\ndef convergence_goal(self, **qwargs):\n import numpy as np\n eps = 0.2\n if \"eps\" in qwargs:\n eps = qwargs[\"eps\"]\n erg_lst = self.get_from_childs(\"output/generic/energy\")\n var = 1000 * np.var(erg_lst)\n # print(var / len(erg_lst))\n if var / len(erg_lst) < eps:\n return True\n job_prev = self[-1]\n job_name = self.first_child_name() + \"_\" + str(len(self))\n job_next = job_prev.restart(job_name=job_name)\n return job_next\n\n\nclass TestMurnaghan(TestWithProject):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.basis = CrystalStructure(\n element=\"Fe\", bravais_basis=\"fcc\", lattice_constant=3.5\n )\n\n def test_interactive_run(self):\n job = self.project.create_job('HessianJob', 'hessian')\n job.set_reference_structure(self.basis)\n job.set_elastic_moduli(1, 1)\n job.set_force_constants(1)\n job.server.run_mode.interactive = True\n murn = job.create_job('Murnaghan', 'murn_hessian')\n murn.input['num_points'] = 5\n murn.input['vol_range'] = 1e-5\n murn.run()\n self.assertAlmostEqual(self.basis.get_volume(), murn['output/equilibrium_volume'])\n\n optimal = murn.get_structure()\n self.assertAlmostEqual(optimal.get_volume(), murn['output/equilibrium_volume'],\n msg=\"Output of get_structure should have equilibrium volume\")\n\n def test_run(self):\n job = self.project.create_job(\n 'AtomisticExampleJob', \"job_test\"\n )\n job.structure = self.basis\n job_ser = self.project.create_job(\n self.project.job_type.SerialMaster, \"murn_iter\"\n )\n job_ser.append(job)\n job_ser.set_goal(convergence_goal, eps=0.4)\n murn = self.project.create_job(\"Murnaghan\", \"murnaghan\")\n murn.ref_job = job_ser\n murn.input['num_points'] = 3\n murn.run()\n self.assertTrue(murn.status.finished)\n\n murn.remove()\n job_ser.remove()\n\n def test_fitting_routines(self):\n ref_job = self.project.create.job.Lammps('ref')\n murn = ref_job.create_job('Murnaghan', 'murn')\n murn.structure = self.basis\n # mock murnaghan run with data from:\n # ref_job = pr.create.job.Lammps('Lammps')\n # ref_job.structure = pr.create_structure('Al','fcc', 4.0).repeat(3)\n # ref_job.potential = '1995--Angelo-J-E--Ni-Al-H--LAMMPS--ipr1'\n # murn = ref_job.create_job(ham.job_type.Murnaghan, 'murn')\n # murn.run()\n energies = np.array([-88.23691773, -88.96842984, -89.55374317, -90.00642629,\n -90.33875009, -90.5618246, -90.68571886, -90.71957679,\n -90.67170222, -90.54964935, -90.36029582])\n volume = np.array([388.79999999, 397.44, 406.08, 414.71999999,\n 423.35999999, 431.99999999, 440.63999999, 449.27999999,\n 457.92, 466.55999999, 475.19999999])\n murn._hdf5[\"output/volume\"] = volume\n murn._hdf5[\"output/energy\"] = energies\n murn._hdf5[\"output/equilibrium_volume\"] = 448.4033384110422\n murn.status.finished = True\n\n murn.plot(plt_show=False)\n with self.subTest(msg=\"standard polynomial fit\"):\n self.assertAlmostEqual(-90.71969974284912, murn.equilibrium_energy)\n self.assertAlmostEqual(448.1341230545222, murn.equilibrium_volume)\n\n with self.subTest(msg=\"polynomial fit with fit_order = 2\"):\n murn.fit_polynomial(fit_order=2)\n self.assertAlmostEqual(-90.76380033222287, murn.equilibrium_energy)\n self.assertAlmostEqual(449.1529040727273, murn.equilibrium_volume)\n\n with self.subTest(msg='birchmurnaghan'):\n murn.fit_birch_murnaghan()\n self.assertAlmostEqual(-90.72005405262217, murn.equilibrium_energy)\n self.assertAlmostEqual(448.41909755611437, murn.equilibrium_volume)\n\n with self.subTest(msg=\"vinet\"):\n murn.fit_vinet()\n self.assertAlmostEqual(-90.72000006839492, murn.equilibrium_energy)\n self.assertAlmostEqual(448.40333840970357, murn.equilibrium_volume)\n\n with self.subTest(msg='murnaghan'):\n murn.fit_murnaghan()\n self.assertAlmostEqual(-90.72018572197015, murn.equilibrium_energy)\n self.assertAlmostEqual(448.4556825322108, murn.equilibrium_volume)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/atomistics/master/test_murnaghan.py","file_name":"test_murnaghan.py","file_ext":"py","file_size_in_byte":4851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"352250572","text":"def myLog(x, b):\n '''\n x: a positive integer\n b: a positive integer; b >= 2\n\n returns: log_b(x), or, the logarithm of x relative to a base b.\n '''\n # Your Code Here\n \n def calpower(b,n):\n if n == 1:\n return (b)\n else:\n return b * calpower(b,n-1)\n \n for i in range(1,x):\n if calpower(b,i)-x == 0:\n return (i)\n break\n elif calpower(b,i)-x > 0:\n return (i-1)\n break\n\nprint(myLog(16, 4))","sub_path":"Learning/MOOC/Python_Edx/Quiz/quiz-program-1.py","file_name":"quiz-program-1.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"488794924","text":"import datetime\nfrom flask_wtf import Form\nfrom wtforms.fields import StringField, TextAreaField, \\\n DateTimeField, IntegerField, SelectField\nfrom wtforms.validators import DataRequired, \\\n URL, InputRequired, Optional, Regexp\n\n\nclass ApplicationForm(Form):\n name = StringField(\n label=\"Name\",\n description=\"Name of your application\",\n validators=[DataRequired()])\n feedbase = StringField(\n label=\"Feed base\",\n description=\"Used for appcast URL, should contains no space \"\n \"and no special characters\",\n validators=[DataRequired(), Regexp('[a-zA-Z0-9]+')])\n os = SelectField(\n label=\"OS\",\n description=\"Target OS\",\n choices=[('osx', 'Mac OS X'), ('windows', 'Windows')],\n validators=[DataRequired()])\n\n\nclass ReleaseForm(Form):\n description = TextAreaField(\n label=\"Description\",\n description=\"Release description\")\n descriptionURL = StringField(\n label=\"Description URL\",\n description=\"URL containing page for description\",\n validators=[Optional(), URL()])\n pubDate = DateTimeField(\n label=\"Publication date\",\n default=datetime.datetime.utcnow(),\n validators=[DataRequired()])\n url = StringField(\n label=\"url\",\n description=\"URL to fetch this release binary\",\n validators=[DataRequired(), URL()])\n length = IntegerField(\n label=\"Length\",\n description=\"Size of this release\",\n validators=[DataRequired()])\n dsaSignature = StringField(\n label=\"Signature\",\n description=\"DSA Signature\",\n validators=[DataRequired()])\n v_major = IntegerField(\n default=0,\n validators=[InputRequired()])\n v_minor = IntegerField(\n default=0,\n validators=[InputRequired()])\n v_revis = IntegerField(\n default=0,\n validators=[InputRequired()])\n","sub_path":"constellark/apps/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"100587912","text":"# @dependency 001-main/002-createrepository.py\n# @dependency 001-main/003-self/200-json/006-changesets.py\n\nFROM_SHA1 = \"573c5ff15ad95cfbc3e2f2efb0a638a4a78c17a7\"\nFROM_SINGLE_SHA1 = \"aabc2b10c930a9e72fe9587a6e8634087bb3efe1\"\nTO_SHA1 = \"6dc8e9c2d952028286d4b83475947bd0b1410860\"\nROOT_SHA1 = \"ee37c47f6f6a14afa6912c1cc58a9f49d2a29acd\"\n\nGENERIC_FILECHANGE = { \"id\": int,\n \"changeset\": int,\n \"old_sha1\": str,\n \"new_sha1\": str,\n \"chunks\": list,\n \"path\": str,\n \"new_mode\": None,\n \"old_mode\": None }\n\n# Filechanges for changeset from single commit\nsingle_changeset = frontend.json(\n \"changesets\",\n params={ \"repository\": 1,\n \"commit\": TO_SHA1})\nfrontend.json(\n \"filechanges\",\n params={ \"repository\": 1,\n \"changeset\": single_changeset[\"id\"]},\n expect={\"filechanges\": [\n {\"changeset\": int,\n \"old_sha1\": \"a2ffb3a6cd3b021c34592f4bd8f32905e4dd5830\",\n \"new_sha1\": \"2d06e47848827d8d8312542f3687f0380ebbc3ed\",\n \"chunks\": [\n {\"insertoffset\": 85,\n \"deleteoffset\": 85,\n \"deletecount\": 0,\n \"is_whitespace\": 0,\n \"analysis\": None,\n \"insertcount\": 4}\n ],\n \"path\": \"testing/__init__.py\",\n \"new_mode\": None,\n \"id\": int,\n \"old_mode\": None},\n {\"changeset\": int,\n \"old_sha1\": \"e285e7c535dd8eee185d71c5adec1a328e586a58\",\n \"new_sha1\": \"ac6fe72b7ffefb9d5d4c6637aa94c02e756b2665\",\n \"chunks\": [\n {\"insertoffset\": 92,\n \"deleteoffset\": 92,\n \"deletecount\": 34,\n \"is_whitespace\": 0,\n \"analysis\": \"0=5:ws,i0-4;1=6:ws,i0-4;2=7:ws,i0-4;3=8:ws,i0-4;4=9:ws,i0-4;5=10:ws,i0-4;6=11:ws,i0-4;7=12:ws,i0-4;8=13:ws,i0-4;9=14:ws,i0-4;10=15:ws,i0-4;11=16:ws,i0-4;13=17:ws,i0-4;14=18;15=19:ws,i0-4;16=20:ws,i0-4;17=21:ws,i0-4;18=22:ws,i0-4;19=23:ws,i0-4;20=24:ws,i0-4;21=25:ws,i0-4;22=26:ws,i0-4;23=27:ws,i0-4;24=28:ws,i0-4;25=29:ws,i0-4;26=30:ws,i0-4;27=31:ws,i0-4;28=32:ws,i0-4;29=33:ws,i0-4;30=34:ws,i0-4\",\n \"insertcount\": 35}],\n \"path\": \"testing/repository.py\",\n \"new_mode\": None,\n \"id\": int,\n \"old_mode\": None},\n {\"changeset\": int,\n \"old_sha1\": \"0f5b7b313b6152f9c4f342c151fa1038a83e03f4\",\n \"new_sha1\": \"c2e9ee01afb2b0cdde940532f93a6823013c8a91\",\n \"chunks\": [\n {\"insertoffset\": 52,\n \"deleteoffset\": 52,\n \"deletecount\": 0,\n \"is_whitespace\": 0,\n \"analysis\": None,\n \"insertcount\": 1}],\n \"path\": \"testing/virtualbox.py\",\n \"new_mode\": None,\n \"id\": int,\n \"old_mode\": None}]})\n\n# Single filechange for changeset from two commits\ncustom_changeset = frontend.json(\n \"changesets\",\n params={ \"repository\": \"critic\",\n \"from\": FROM_SHA1,\n \"to\": TO_SHA1 })\nfrontend.json(\n \"filechanges/\" + str(custom_changeset[\"filediffs\"][0]),\n params={ \"repository\": 1,\n \"changeset\": custom_changeset[\"id\"]},\n expect=GENERIC_FILECHANGE)\n\n# Invalid filechange id\nfrontend.json(\n \"filechanges/-1\",\n params={ \"repository\": 1,\n \"changeset\": custom_changeset[\"id\"]},\n expect={ \"error\": {\n \"message\": \"Invalid numeric id: '-1'\",\n \"title\": \"Invalid API request\"}\n },\n expected_http_status=400)\n","sub_path":"testing/tests/001-main/003-self/200-json/007-filechanges.py","file_name":"007-filechanges.py","file_ext":"py","file_size_in_byte":3530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"241897463","text":"#!/usr/bin/env python\n\n\nclass Config:\n dax_stocks = dict(dax=\"^GDAXI\", adidas=\"ADS.DE\", allianz=\"ALV.DE\", basf=\"BAS.DE\", bayer=\"BAYN.DE\",\n beiersdorf=\"BEI.DE\", bmw=\"BMW.DE\", continental=\"CON.DE\", covestro=\"1COV.DE\", daimler=\"DAI.DE\",\n deutsche_bank=\"DBK.DE\", deutsche_boerse=\"DB1.DE\", deutsche_lufthansa=\"LHA.DE\",\n deutsche_post=\"DPW.DE\", deutsche_telekom=\"DTE.DE\", eon=\"EOAN.DE\", fresenius=\"FRE.DE\",\n fresenius_medical_care=\"FME.DE\", heidelberg_cement=\"HEI.DE\", henkel=\"HEN3.DE\", infineon=\"IFX.DE\",\n linde=\"LIN.DE\", merck=\"MRK.DE\", mtu_aero_engines=\"MTX.DE\", muenchener_rueckversicherung=\"MUV2.DE\",\n rwe=\"RWE.DE\", sap=\"SAP.DE\", siemens=\"SIE.DE\", volkswagen=\"VOW3.DE\", vonovia=\"VNA.DE\",\n wirecard=\"WDI.DE\")\n start = '2018-01-01'\n end = '2020-03-28'\n","sub_path":"configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"103288205","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.8 (3413)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: .\\src\\sources\\mtlnovel.py\n# Compiled at: 2020-04-22 08:51:26\n# Size of source mod 2**32: 3521 bytes\nimport json, logging, re\nfrom utils.crawler import Crawler\nlogger = logging.getLogger('MTLNOVEL')\nsearch_url = 'https://www.mtlnovel.com/wp-admin/admin-ajax.php?action=autosuggest&q=%s'\n\nclass MtlnovelCrawler(Crawler):\n base_url = 'https://www.mtlnovel.com/'\n\n def search_novel(self, query):\n query = query.lower().replace(' ', '%20')\n list_url = search_url % query\n data = self.get_json(list_url)['items'][0]['results']\n results = []\n for item in data:\n url = self.absolute_url('https://www.mtlnovel.com/?p=%s' % item['id'])\n results.append({'url':url, \n 'title':item['title'], \n 'info':self.search_novel_info(url)})\n else:\n return results\n\n def search_novel_info(self, url):\n \"\"\"Get novel title, autor, cover etc\"\"\"\n logger.debug('Visiting %s', url)\n soup = self.get_soup(url)\n chapters = soup.select('div.info-wrap div')[1].text.replace('Chapters', '')\n info = '%s chapters' % chapters\n return info\n\n def read_novel_info(self):\n \"\"\"Get novel title, autor, cover etc\"\"\"\n logger.debug('Visiting %s', self.novel_url)\n soup = self.get_soup(self.novel_url)\n self.novel_title = soup.select_one('h1.entry-title').text.strip()\n logger.info('Novel title: %s', self.novel_title)\n self.novel_cover = self.absolute_url(soup.select('div.nov-head amp-img')[1]['src'])\n logger.info('Novel cover: %s', self.novel_cover)\n self.novel_author = soup.select('table.info tr')[3].find('a').text\n logger.info('Novel author: %s', self.novel_author)\n chapter_list = soup.select('div.ch-list amp-list')\n for item in chapter_list:\n data = self.get_json(item['src'])\n for chapter in data['items']:\n chap_id = len(self.chapters) + 1\n if len(self.chapters) % 100 == 0:\n vol_id = chap_id // 100 + 1\n vol_title = 'Volume ' + str(vol_id)\n self.volumes.append({'id':vol_id, \n 'title':vol_title})\n self.chapters.append({'id':chap_id, \n 'volume':vol_id, \n 'url':chapter['permalink'], \n 'title':chapter['title'] or 'Chapter %d' % chap_id})\n\n def download_chapter_body(self, chapter):\n \"\"\"Download body of a single chapter and return as clean html format.\"\"\"\n logger.info('Downloading %s', chapter['url'])\n soup = self.get_soup(chapter['url'])\n logger.debug(soup.title.string)\n contents = soup.select('div.par p')\n body = [str(p) for p in contents if p.text.strip()]\n return '

' + '

'.join(body) + '

'","sub_path":"pycfiles/lightnovel_crawler-2.21.0-py3-none-any/mtlnovel.cpython-38.py","file_name":"mtlnovel.cpython-38.py","file_ext":"py","file_size_in_byte":3008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"625304152","text":"from bs4 import BeautifulSoup\r\nfrom string import *\r\nimport csv\r\nfrom urllib.request import Request, urlopen\r\nfor i in range(1,7):\r\n urlpage = 'URL/page-'+str(i)+'/'\r\n page = Request(urlpage, headers={'User-Agent': 'Mozilla/5.0'})\r\n webpage_byte = urlopen(page).read()\r\n webpage = webpage_byte.decode('utf-8')\r\n results = BeautifulSoup(webpage, 'html.parser')\r\n job_elements = results.find_all(\"div\", class_=\"prod_list\")\r\n for job_element in job_elements:\r\n\t name = getattr(job_element.find(\"span\", class_=\"titleSpan\"), 'text', None)\r\n\t address = getattr(job_element.find(\"span\", class_=\"placeText\"), 'text', None)\r\n\t phone = getattr(job_element.find(\"a\", class_=\"coordonneesItemLink showMobile\"), 'text', None)\r\n\t print(str(name)+\";\"+str(address)+\";\"+str(phone).strip())\r\n","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"303946169","text":"#!/usr/bin/python\n#\n# Copyright 2019 The Cloud Robotics Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tunnel ROS over Pub/Sub.\n\nEach instance of the ros-adapter talks to the cloud-tunnel instance associated\nwith the robot. This forms a per-robot PubSub system that bridges ros messages\nbetween the connected parties.\n\nThe messages that should be mirrored can be configured per ros-adapter instance\nin the ros-adapter.yaml.\n\nThe PubSub topics are created as robots.{id}.ros.{rostopic}.\n\"\"\"\n\nfrom __future__ import division\n\nimport argparse\nimport cStringIO\nimport json\nimport os\nimport Queue\nimport threading\nimport time\nimport traceback\nimport urllib2\nimport yaml\n\nfrom google.cloud import pubsub\nfrom google.cloud import _helpers\n\nimport grpc\nimport prometheus_client\nimport rospy\n\nimport announce\nimport param_sync\nimport shapeshifter\n\n\nCONFIG_PATH = 'ros-adapter.yaml'\nSCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))\n\n\nQPS_COUNTER = prometheus_client.Counter(\n 'ros_messages_total', 'ROS messages relayed', [\n 'direction', 'network', 'topic'])\n\n\n# TODO(rodrigoq): remove this monkey-patch once either there's a better way to\n# do it (see upstream issue:\n# https://github.com/GoogleCloudPlatform/google-cloud-python/issues/3594) or\n# when we move to another API/transport.\nfrom google.cloud.pubsub import _gax\n\n\ndef make_secure_channel(credentials, user_agent, host, extra_options=()):\n if host == \"pubsub.googleapis.com\":\n host = os.environ['CLOUD_ROBOTICS_DOMAIN']\n MAX_MESSAGE_BYTES = 100 * 1024 * 1024\n options = (\n ('grpc.max_message_length', MAX_MESSAGE_BYTES),\n ('grpc.max_send_message_length', MAX_MESSAGE_BYTES),\n ('grpc.max_receive_message_length', MAX_MESSAGE_BYTES),\n ) + extra_options\n return _helpers.make_secure_channel(\n credentials, user_agent, host, extra_options)\n\n\n_gax.make_secure_channel = make_secure_channel\n\n\n# TODO(rodrigoq): allow live config changes - config in Firestore?\nclass Config(object):\n def __init__(self, path):\n with open(path, 'r') as f:\n config_dict = yaml.safe_load(f)\n\n self.topics = config_dict['topics']\n self.params = config_dict['params']\n\n\ndef pubsub_topic_name(robot_name, ros_topic):\n # TODO(rodrigoq): check length. PubSub resource name is 255 chars max.\n # TODO(rodrigoq): use `network` in the topic name so we don't subscribe to\n # our own messages. This might also help the latency issue.\n return 'robots.%s.ros.%s' % (robot_name, ros_topic.replace('/', '.'))\n\n\ndef pubsub_subscription_name(topic_name, network):\n # TODO(rodrigoq): check length. PubSub resource name is 255 chars max.\n return '%s.%s' % (topic_name, network)\n\n\nclass _MaxHzFilter(object):\n def __init__(self, max_hz):\n if max_hz:\n self.period = 1 / max_hz\n else:\n self.period = 0\n self._next_msg = 0\n\n def pulse(self):\n if not self.period:\n return True\n\n now = time.time()\n if self._next_msg < now:\n self._next_msg = now + self.period\n return True\n\n\nclass TopicAdapter(object):\n \"\"\"Adapts messages on one topic:\n - subscribes to the ROS topic and publishes messages to PubSub\n - subscribes to the PubSub topic and publishes messages to ROS\n (filtering out messages we published to avoid loops)\n \"\"\"\n\n # Max number of messages in a Publish() RPC. 10 allows ~300 messages/s when\n # RTT = 30ms.\n PUBLISH_MAX_MESSAGES = 10\n\n # Max number of messages returned by a Pull() RPC. 2x PUBLISH_MAX_MESSAGES\n # because Acknowledge() means pulling requires 2x as many calls.\n PULL_MAX_MESSAGES = 20\n ROS_QUEUE_SIZE = 20\n\n # When dropping messages, log a warning at most this often.\n WARN_THROTTLE_SECONDS = 1\n\n def __init__(\n self,\n ros_adapter,\n robot_name,\n pubsub_client,\n network,\n ros_topic,\n params):\n self.ros_adapter = ros_adapter\n self.ros_topic = ros_topic\n self.network = network\n\n self.latch = params.get('latch', False)\n self.max_hz_filter = _MaxHzFilter(params.get('max_hz', 0))\n\n self.latched_msg = None\n\n self.topic = pubsub_client.topic(pubsub_topic_name(robot_name, ros_topic))\n if not self.topic.exists():\n self.topic.create()\n\n self.subscription = self.topic.subscription(\n pubsub_subscription_name(self.topic.name, network))\n if not self.subscription.exists():\n self.subscription.create()\n\n self.publish_queue = Queue.Queue()\n\n # TODO(rodrigoq): add a way to unsubscribe on closing the app\n rospy.loginfo('creating ROS subscriber for %s...', ros_topic)\n self.ros_subscriber = shapeshifter.Subscriber(ros_topic, self.callback)\n # TODO(rodrigoq): we always use latched publishers because we register\n # lazily on the first message, meaning that subscribers otherwise miss the\n # first message. This could have unintended effects, so it would be better\n # to have some sort of delay/timeout on the first message.\n self.ros_publisher = shapeshifter.Publisher(ros_topic, self.ROS_QUEUE_SIZE,\n latch=True)\n\n self.pull_thread = threading.Thread(target=self.pull_pubsub_messages)\n # TODO(rodrigoq): add a way to cleanly stop the thread\n self.pull_thread.daemon = True\n self.pull_thread.start()\n\n self.publish_thread = threading.Thread(target=self.publish_pubsub_messages)\n # TODO(rodrigoq): add a way to cleanly stop the thread\n self.publish_thread.daemon = True\n self.publish_thread.start()\n\n # TODO(rodrigoq): add retries. should probably copy the notifier as I'm\n # rewriting it here...\n def pull_pubsub_messages(self):\n while True:\n try:\n rospy.logdebug('starting pull on %s...', self.ros_topic)\n pulled = self.subscription.pull(max_messages=self.PULL_MAX_MESSAGES)\n if pulled:\n rospy.logdebug(\n 'pulled %d messages on %s',\n len(pulled),\n self.ros_topic)\n\n for ack_id, message in pulled:\n self.publish_to_ros(message)\n self.subscription.acknowledge([i for i, _ in pulled])\n except BaseException:\n rospy.logerr('Exception in Pub/Sub pull thread:\\n' +\n ''.join(traceback.format_exc()))\n\n def publish_pubsub_messages(self):\n while True:\n try:\n to_publish = [self.publish_queue.get()]\n try:\n while True:\n to_publish.append(self.publish_queue.get_nowait())\n except Queue.Empty:\n pass\n\n with self.topic.batch() as batch:\n for msg in to_publish:\n batch.publish(msg)\n rospy.logdebug('published %d messages to Pub/Sub on %s',\n len(to_publish), self.ros_topic)\n\n except BaseException:\n rospy.logerr('Exception in Pub/Sub publish thread:\\n' +\n ''.join(traceback.format_exc()))\n\n def publish_to_ros(self, pubsub_message):\n message = json.loads(pubsub_message.data.decode('utf-8'))\n if message['source'] == self.network:\n return\n self.ros_publisher.publish(\n message['type'],\n message['md5sum'],\n message['buff'].encode('latin-1'))\n QPS_COUNTER.labels(\n direction=\"out\",\n network=self.network,\n topic=self.ros_topic).inc()\n\n def callback(self, message):\n if message._connection_header['callerid'] == rospy.get_caller_id():\n return\n if not self.max_hz_filter.pulse():\n return\n\n # Extract metadata from the rospy.AnyMsg instance. This is required to\n # advertise the topic correctly on the other end.\n type = message._connection_header['type']\n md5sum = message._connection_header['md5sum']\n\n if hasattr(message, '_buff'):\n buff = message._buff\n else:\n # Some topics (eg clock) already have subscribers created by rospy, which\n # means that we have to use the data class specified by rospy instead of\n # our own rospy.AnyMsg.\n sio = cStringIO.StringIO()\n message.serialize(sio)\n buff = sio.getvalue()\n\n # TODO(rodrigoq): use proto instead of json\n serialized = json.dumps({\n 'type': type,\n 'md5sum': md5sum,\n 'source': self.network,\n 'buff': buff.decode('latin-1'),\n }).encode('utf-8')\n\n self.latched_msg = serialized\n self.add_to_queue(serialized)\n QPS_COUNTER.labels(\n direction=\"in\",\n network=self.network,\n topic=self.ros_topic).inc()\n\n def on_announcement(self):\n if self.latched_msg:\n self.add_to_queue(self.latched_msg)\n\n def add_to_queue(self, serialized):\n self.publish_queue.put_nowait(serialized)\n while self.publish_queue.qsize() > self.PUBLISH_MAX_MESSAGES:\n rospy.logwarn_throttle(\n self.WARN_THROTTLE_SECONDS,\n 'dropping message(s?) on %s due to full buffer' % self.ros_topic)\n # drop the oldest message and put the new message again\n try:\n self.publish_queue.get_nowait()\n except Queue.Empty:\n pass\n\n\nclass RosAdapter(object):\n def __init__(self, config, project, robot_name, pubsub_client, network):\n self.config = config\n self.project = project\n self.robot_name = robot_name\n self.pubsub_client = pubsub_client\n self.network = network\n\n self.announcer = None\n self.topic_adapters = {}\n\n def start(self):\n for topic, params in self.config.topics.items():\n self.add_topic(topic, params)\n\n self.param_sync = param_sync.ParamSync(\n self.project, self.robot_name, self.config.params)\n\n self.announcer = announce.Announcer(\n self.robot_name,\n self.pubsub_client,\n self.network,\n self.on_announcement)\n\n # Announce to ourself to avoid a race condition between initial publishing\n # and subscribing to announcements.\n self.on_announcement(self.network)\n\n def add_topic(self, topic, params):\n if topic not in self.topic_adapters:\n self.topic_adapters[topic] = TopicAdapter(\n self, self.robot_name, self.pubsub_client, self.network, topic, params)\n\n def on_announcement(self, new_network):\n rospy.loginfo('received announcement from %r', new_network)\n\n for topic_adapter in self.topic_adapters.values():\n topic_adapter.on_announcement()\n\n self.param_sync.cloud_params.poll()\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Tunnel ROS over Pub/Sub.')\n parser.add_argument('network',\n help='Unique ID for the network this adapter runs'\n ' on, eg robot, cloud, dev')\n args = parser.parse_args()\n\n try:\n prometheus_client.start_http_server(80)\n except BaseException:\n rospy.loginfo('failed to bind prometheus HTTP server to port 80')\n\n config = Config(os.path.join(SCRIPT_DIR, CONFIG_PATH))\n\n rospy.loginfo('connecting to ros master ' + os.environ['ROS_MASTER_URI'])\n rospy.init_node('ros_adapter_' + args.network)\n\n project = os.environ['GOOGLE_CLOUD_PROJECT']\n robot_name = os.environ['ROBOT_NAME']\n pubsub_client = pubsub.Client()\n\n ros_adapter = RosAdapter(\n config,\n project,\n robot_name,\n pubsub_client,\n args.network)\n ros_adapter.start()\n\n rospy.loginfo('ros-adapter has started')\n rospy.spin()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/python/ros_adapter/ros_adapter.py","file_name":"ros_adapter.py","file_ext":"py","file_size_in_byte":11673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"406295661","text":"import pygame\nimport random\n\npygame.init()\n\nscreen = pygame.display.set_mode((600,600))\n\nbackground = pygame.image.load(\"backgroundblue.jpg\")\n\nlist = [0.1,0.3,-0.1,-0.3,]\ndeltaY = random.choice(list)\n\nballImg = pygame.image.load(\"ball.png\")\nballX = 300\nballY = 300\nball_Xchange = -0.2\nball_Ychange = deltaY\n\nplayerImg = pygame.image.load(\"rectangle 256.png\")\nplayerXL = -168\nplayerYL = 250\nplayer_XLchange = 0\nplayer_YLchange = 0\n\nplayerXR = 512\nplayerYR = 250\nplayer_XRchange = 0\nplayer_YRchange = 0\n\ndef playerleft(x,y):\n screen.blit(playerImg, (x,y))\n\ndef playerright(x,y):\n screen.blit(playerImg, (x,y))\n\ndef ball(x,y):\n screen.blit(ballImg, (x,y))\n\ngame_running = True\nwhile game_running:\n screen.blit(background, (0,0))\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n game_running = False\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_UP:\n player_YRchange = -0.3\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_DOWN:\n player_YRchange = 0.3\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_w:\n player_YLchange = -0.3\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_s:\n player_YLchange = 0.3\n #\n # if event.type == pygame.KEYDOWN:\n # if event.key == pygame.K_SPACE:\n # ball_Xchange = -0.2\n # ball_Ychange = deltaY\n\n playerleft(playerXL, playerYL)\n playerYL += player_YLchange\n if playerYL < 0:\n playerYL = 0\n elif playerYL > 344:\n playerYL = 344\n\n playerright(playerXR, playerYR)\n playerYR += player_YRchange\n if playerYR < 0:\n playerYR = 0\n elif playerYR > 344:\n playerYR = 344\n\n ball(ballX, ballY)\n ballX += ball_Xchange\n ballY += ball_Ychange\n if (ballX < 0 and ballX > -1) and (ballY>playerYL and ballY < (playerYL+256)):\n ball_Xchange = -ball_Xchange\n if (ballX > 570 and ballX < 571) and (ballY>playerYR and ballY < (playerYR+256)):\n ball_Xchange = -ball_Xchange\n if ballY < 0:\n ball_Ychange = -ball_Ychange\n if ballY > 570:\n ball_Ychange = -ball_Ychange\n\n pygame.display.update()","sub_path":"table tennis/twoplayer.py","file_name":"twoplayer.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"594778830","text":"import torch\nimport torch.utils.data\nfrom torch import nn, optim\nfrom torch.nn import functional as F\nimport numpy as np\nimport pdb\n\nclass siameseCVAE(nn.Module):\n\tdef __init__(self,batch=4):\n\t\tsuper().__init__()\n\t\td = 0.4\n\t\tself.z_size = 64\n\t\tself.small = 256\n\t\tself.hidden = 1024\n\t\tch_sz = 1\n\t\tc1 = 64\n\t\tc2 = 16\n\t\tlast_conv = 4\n\t\tself.tensor = (batch,last_conv,150,200)\n\t\tflat = np.prod(self.tensor)\n\t\tflat2 = flat*2\n\n\t\t# channel_in, c_out, kernel_size, stride, padding\n\t\tdef convbn(ci,co,ksz,s=1,pz=0):\t\t#ReLu nonlinearity\n\t\t\treturn nn.Sequential(\n\t\t\t\tnn.Conv2d(ci,co,ksz,stride=s,padding=pz),\n\t\t\t\tnn.ReLU(),\n\t\t\t\tnn.BatchNorm2d(co))\n\t\tdef convout(ci,co,ksz,s=1,pz=0):\t#Sigmoid nonlinearity\n\t\t\treturn nn.Sequential(\n\t\t\t\tnn.Conv2d(ci,co,ksz,stride=s,padding=pz),\n\t\t\t\tnn.Sigmoid(),\n\t\t\t\tnn.BatchNorm2d(co))\n\t\tdef mlp(in_size,hidden):\n\t\t\treturn nn.Sequential(\n\t\t\t\tnn.Dropout(d),\n\t\t\t\tnn.Linear(in_size,hidden),\n\t\t\t\tnn.ReLU())\n\n\t\t#Encoder NN\n\t\tself.encx = nn.Sequential(\n\t\t\t\tnn.Dropout(d),\n\t\t\t\tconvbn(ch_sz,c1,3,1,1),\n\t\t\t\tconvbn(c1,c2,3,1,1),\n\t\t\t\tconvbn(c2,last_conv,3,1,1))\n\t\tself.ency = nn.Sequential(\n\t\t\t\tnn.Dropout(d),\n\t\t\t\tconvbn(ch_sz,c1,3,1,1),\n\t\t\t\tconvbn(c1,c2,3,1,1),\n\t\t\t\tconvbn(c2,last_conv,3,1,1))\n\t\tself.m1 = nn.Sequential(\n\t\t\t\tnn.Dropout(d),\n\t\t\t\tmlp(flat2,self.hidden),\n\t\t\t\tmlp(self.hidden, self.small))\n\t\tself.zmean = nn.Linear(self.small,self.z_size)\n\t\tself.zlogvar = nn.Linear(self.small,self.z_size)\n\n\t\t#Decoder NN\n\t\tself.expand_z = nn.Linear(self.z_size,self.small)\n\t\tself.mx = nn.Sequential(\n\t\t\t\tnn.Dropout(d),\n\t\t\t\tmlp(self.small,self.hidden),\n\t\t\t\tmlp(self.hidden,flat))\n\t\tself.my = nn.Sequential(\n\t\t\t\tnn.Dropout(d),\n\t\t\t\tmlp(self.small,self.hidden),\n\t\t\t\tmlp(self.hidden,flat))\n\t\tself.decx = nn.Sequential(\n\t\t\t\tnn.Dropout(d),\n\t\t\t\tconvbn(last_conv,c2,3,1,1),\n\t\t\t\tconvbn(c2,c1,3,1,1),\n\t\t\t\tconvout(c1,ch_sz,3,1,1))\n\t\tself.decy = nn.Sequential(\n\t\t\t\tnn.Dropout(d),\n\t\t\t\tconvbn(last_conv,c2,3,1,1),\n\t\t\t\tconvbn(c2,c1,3,1,1),\n\t\t\t\tconvout(c1,ch_sz,3,1,1))\n\n\tdef encoder(self, x, y):\n\t\t# Flatten enc output\n\t\th_x = self.encx(x).view(-1)\n\t\th_y = self.ency(y).view(-1)\n\t\t# Concatenate flat convs\n\t\th_layer = torch.cat((h_x,h_y))\n\t\th = self.m1(h_layer)\n\t\treturn h\n\n\tdef bottleneck(self, x):\n\t\tz_mean = self.zmean(x)\n\t\tz_logvar = self.zlogvar(x)\n\t\t#reparam to get z latent sample\n\t\tstd = torch.exp(0.5*z_logvar)\n\t\teps = torch.randn_like(std)\n\t\tz = z_mean + eps*std\n\t\treturn z, z_mean, z_logvar\n\n\tdef decoder(self, z):\n\t\t#check the nonlinearities of this layer\n\t\th = self.expand_z(z)\n\t\t#exand z to each decoder head\n\t\th_x = self.mx(h)\n\t\th_y = self.my(h)\n\t\t#make sure to reshape data correctly and decode\n\t\tx = self.decx(h_x.view(self.tensor))\n\t\ty = self.decy(h_x.view(self.tensor))\n\t\treturn x, y\n\n\tdef forward(self, x, y):\n\t\th = self.encoder(x, y)\n\t\tz, z_mean, z_logvar = self.bottleneck(h)\n\t\tx_hat, y_hat = self.decoder(z)\n\t\treturn x_hat, y_hat, z, z_mean, z_logvar\n\n\tdef encode_get_z(self, x, y):\n\t\th = self.encoder(x, y)\n\t\tz, z_mean, z_logvar = self.bottleneck(h)\n\t\treturn z, z_mean, z_logvar","sub_path":"model/siameseCVAE.py","file_name":"siameseCVAE.py","file_ext":"py","file_size_in_byte":2992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"577891044","text":"class CommentEntity:\n \"\"\"Comment entity\"\"\"\n\n __slots__ = (\n 'id',\n 'first_name',\n 'second_name',\n 'last_name',\n 'phone',\n 'email',\n 'text',\n 'city_id',\n )\n\n def __init__(self, _first_name: str, _last_name: str, _text: str, _city_id: int = None, \n _second_name: str = None, _phone: str = None, _email: str = None, _id: int = None):\n \"\"\"\n Конструктор\n Args:\n _id: ID Комментария\n _first_name: Имя\n _second_name: Отчество\n _last_name: Фамилия\n _phone: Телефон\n _email: Email\n _text: Текст\n _city_id: id города\n \"\"\"\n self.id = _id\n self.first_name = _first_name\n self.second_name = _second_name\n self.last_name = _last_name\n self.phone = _phone\n self.email = _email\n self.text = _text\n self.city_id = _city_id\n","sub_path":"entities/comment.py","file_name":"comment.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"517747888","text":"from django. urls import path, include\nfrom . import views\nfrom rest_framework import routers\n\n\n# we will use router + viewset combination to handle the GET,POST,DELETE,PATCH request in just one view\nrouter = routers.DefaultRouter()\nrouter.register('choices', views.ChoiceViewSet)\n\n\nurlpatterns = [\n path('polls/list/', views.PollList.as_view(), name='poll_list_api'),\n path('polls//', views.PollDetail.as_view(), name='poll_detail_api'),\n path('rest-auth/', include('rest_auth.urls')),\n # include authentications. Login/Logout\n path('api-auth/', include('rest_framework.urls')),\n # Include Login link at the interface\n path('users/list/', views.UserList.as_view(), name='user_list_api_view'),\n path('', include(router.urls)),\n # Includes all the choices/list, choices/2 in a single view and makes url dynamically\n]","sub_path":"api_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"143558111","text":"from rest_framework import serializers\n\nfrom .models import Empresa\n\n\nclass EmpresaSerializer(serializers.ModelSerializer):\n to_string = serializers.SerializerMethodField()\n\n def get_to_string(self, instance): # pragma: no cover\n return instance.nombre\n\n class Meta:\n model = Empresa\n fields = [\n 'url',\n 'id',\n 'nombre',\n 'to_string',\n 'nit',\n ]\n","sub_path":"empresas/api_serializers.py","file_name":"api_serializers.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"256424657","text":"from django.utils import timezone\nfrom django.conf import settings\n\nTIMEZONE_KEY = getattr(settings, 'TIMEZONE_KEY', 'django_timezone')\n\nclass NginxMiddleware(object):\n \"\"\"\n Fixes REMOTE_ADDR for Njinx/Apache2 config only. \n \"\"\"\n\n def process_request(self, request):\n request.META['REMOTE_ADDR'] = request.META.get(\n 'HTTP_X_FORWARDED_FOR', request.META.get(\n 'HTTP_X_REAL_IP', request.META.get('REMOTE_ADDR', None)))\n\n\nclass TimezoneMiddleware(object):\n \"\"\"\n Activates timezone if available.\n \"\"\"\n\n def process_request(self, request):\n \"\"\" \n Setup key in settings, by default TIMEZONE_KEY = 'django_timezone'.\n It can process keys like session['geoip']['time_zone'] => geoip.time_zone\n \"\"\"\n\n store = request.session\n for key in TIMEZONE_KEY.split('.'):\n store = store.get(key, None) \n if store and store != request.session:\n timezone.activate(store)\n","sub_path":"djutils/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"313246926","text":"'''\nWrite a function every_nth that takes one string and one integer n as input \nand returns a string containing every nth character of the original string, \nstarting with the first character. You can use len() and the membership \noperator in but no other string functions. You should have loops!\n\nFor example:\n\nTest\tResult\nprint(every_nth(\"this is a test 1 with n = 2\",2))\nti sats ihn=2\nprint(every_nth(\"t\",4))\n'''\ndef every_nth(par_str, par_int):\n new_string = ''\n for i in range(0, len(par_str), par_int):\n new_string += par_str[i]\n return new_string\n ","sub_path":"Wk4_June_11-17/LAB_7/LAB7_Q2.py","file_name":"LAB7_Q2.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"229964219","text":"import random\r\n\r\nbeginning = ['he ', 'she ', 'it ', 'there ', 'the ', 'a ', 'an ', 'in a land ', 'in a land far far away ', 'when the world was full of wonders, ', 'miles away ', 'in a kingdom ']\r\n\r\nclass Markov(object):\r\n\r\n def __init__(self, order):\r\n self.order = order\r\n self.group_size = self.order + 1\r\n self.text = None\r\n self.graph = {}\r\n return\r\n\r\n def train(self, filename):\r\n self.text = file(filename).read().split()\r\n self.text = self.text + self.text [: self.order]\r\n\r\n for i in range(0, len (self.text) - self.group_size):\r\n key = tuple (self.text [i:i + self.order])\r\n value = self.text [i + self.order]\r\n if key in self.graph:\r\n self.graph[key].append(value)\r\n else:\r\n self.graph[key] = [value]\r\n return\r\n\r\n def generate(self,length):\r\n index = random.randint (0,(len(self.text) - self.order))\r\n result = self.text[index: index + self.order]\r\n\r\n for i in range (length):\r\n state = tuple (result[len(result) - self.order:])\r\n next_word = random.choice(self.graph[state])\r\n result.append(next_word)\r\n\r\n for i in range (length):\r\n state = tuple (result[len(result) - self.order:])\r\n first_word = random.choice(self.graph[state])\r\n result.append(first_word)\r\n\r\n # Funktion um nicht-ascii zu löschen, geht auch nicht (keine ahnung, wie man das richtig macht)\r\n # def remove_non_ascii(text):\r\n # return ''.join(i for i in text if ord(i)<128)\r\n\r\n first = random.choice(beginning)\r\n\r\n text = \" \".join (result [self.order:])\r\n finaltext = \"Once upon a time \" + first + text + \" and they lived happily ever after.\"\r\n return finaltext\r\n\r\n\r\ndef fairy():\r\n fairytale = Markov(3)\r\n fairytale.train('fairytales2.txt')\r\n fairytale.generate(98)\r\n\r\n\r\n# Code written with help of Omer Nevo's instruction on Generative Poetry\r\n#http://il.pycon.org/2016/static/sessions/omer-nevo.pdf\r\n","sub_path":"Fairytale/Archiv/generator1.py","file_name":"generator1.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"77531042","text":"#20CS60R56\n#VINEETH KUMAR BALAPANURU\n# IMPLEMENTED BOTH TASK1 AND TASK2 IN THIS PROGRAM ITSELF\n\nimport urllib.request, urllib.error, urllib.parse\nimport ply.lex as lex\nimport ply.yacc as yacc\nimport re\nimport os\nimport sys\n\ntokens = (\n 'MOVIENAMEATTR' , 'MOVIENAME' ,\n 'MOVIEDESCRIPATTR' , 'MOVIEDESCRIP' ,\n 'DIRECTORATTR' , 'PRODUCERATTR' , 'WRITERATTR' ,\n 'OGLANGATTR' , 'OGLANG',\n 'CASTATTR' , 'CAST' , 'ROLE' , 'CASTREF',\n 'BOXOFFICEATTR' , 'BOXOFFICE' ,\n 'RUNTIMEATTR' , 'RUNTIME' ,\n 'GENREATTR' , 'GENRE' ,\n 'SIMMOVIEATTR','SIMMOVIE' , 'SIMMOVIEREF' ,\n 'BDAYATTR' , 'BDAY' , 'HRATEDATTR' , 'LRATEDATTR' ,\n 'MOVIE' , 'MLISTATTR' , 'MLIST' ,\n 'WHERETOATTR' , 'WHERETO' ,\n 'CELEBRITY' , 'END'\n )\n\n\n# Ignored characters\nt_ignore = \"\\r\"\n\ndef t_newline(t):\n r'\\n+'\n t.lexer.lineno += t.value.count(\"\\n\")\n\ndef t_error(t):\n t.lexer.skip(1)\n\n#------------------------- MOVIE NAME LEXICONS ----------------------------------\ndef t_MOVIENAMEATTR(t):\n r''\n t.value = 'name'\n return t\n\n\ndef t_MOVIENAME(t):\n r'([^<]*)'\n t.value = (t.value)[7:-18]\n return t\n\n#------------------------- MOVIE DESCRIPTION LEXICONS ----------------------------------\n\n# MOVIEDESCRIP START IS SAME AS MOVIENAMEEND\n\ndef t_MOVIEDESCRIPATTR(t):\n r''\n t.value = 'story'\n return t\n\ndef t_MOVIEDESCRIP(t):\n r']*)'\n t.value = (t.value)[34:-1]\n return t\n#------------------------- ORIGINAL LANGUAGE DESCRIPTION LEXICONS ----------------------------------\ndef t_OGLANGATTR(t):\n r'Original\\ Language:'\n t.value = 'language'\n return t\n\ndef t_OGLANG(t):\n r'[A-Za-z][A-Za-z]([A-Za-z\\s\\(\\)]+)\\n'\n s = (t.value).find('>')\n t.value = (t.value)[s+1:-1]\n return t\n#------------------------- DIRECTOR DESCRIPTION LEXICONS ----------------------------------\n\ndef t_DIRECTORATTR(t):\n r'Director:'\n t.value = 'director'\n return t\n\n\n#DIRECTOREND IS SAME AS PRODUCERSTART\n\n#------------------------- PRODUCER DESCRIPTION LEXICONS ----------------------------------\n\ndef t_PRODUCERATTR(t):\n r'Producer:'\n t.value = 'producer'\n return t\n\n#PRODUCEREND IS SAME AS WRITERSTART\n\n#------------------------- WRITER DESCRIPTION LEXICONS ----------------------------------\n\ndef t_WRITERATTR(t):\n r'Writer:'\n t.value = 'writer'\n return t\n\n#------------------------- CAST & CREW DESCRIPTION LEXICONS ----------------------------------\n\ndef t_CASTATTR(t):\n r'Cast\\ &\\ Crew'\n t.value = 'cast'\n return t\n\ndef t_CASTREF(t):\n r''\n t.value = (t.value).replace('' , '' )\n return t\n\ndef t_CAST(t):\n r']+)'\n t.value = (t.value).replace('([\\n\\s]+)([A-Za-z\\(\\)\\s-]+)([\\s\\n]+)((
)*)([\\s\\n]*)
([\\s\\n]+)([\\s\\n]*)'\n t.value = t.value.replace('\\n' , \"\").replace(\" \" , \"\").replace('
' , \"\").replace('' , \"\").replace('' , \"\")\n return t\n\n#------------------------- BOX OFFICE LEXICONS ----------------------------------\ndef t_BOXOFFICEATTR(t):\n r'Box\\ Office\\ \\(Gross\\ USA\\):'\n t.value = 'boxoffice'\n return t\n\ndef t_BOXOFFICE(t):\n r'\\$([0-9,.]+)([KMB]*)'\n t.value = (t.value)[56:-6]\n return t\n#------------------------- RUNTIME LEXICONS ----------------------------------\ndef t_RUNTIMEATTR(t):\n r'Runtime:'\n t.value = 'runtime'\n return t\n\ndef t_RUNTIME(t):\n r'\\n(\\s+)([A-Za-z0-9\\s]+)'\n x = t.value.split('\\n')\n t.value = x[1].strip()\n return t\n\n#------------------------- GENRE LEXICONS ----------------------------------\ndef t_GENREATTR(t):\n r'Genre:'\n t.value = 'genre'\n return t\n\ndef t_GENRE(t):\n r'([A-Za-z,\\n\\s]+)'\n t.value = (t.value)[63:-7].replace(' ' , '').replace('\\n' , '')\n return t\n\n#-------------------------CELEB LEXICONS---------------------------------------\ndef t_BDAYATTR(t):\n r''\n t.value = 'bday'\n return t\n\ndef t_BDAY(t):\n r'Birthday:\\n(\\s+)([A-Za-z0-9,\\s]+)'\n t.value = t.value.replace('Birthday:\\n' , '').strip()\n return t\n\ndef t_HRATEDATTR(t):\n r'Highest\\ Rated:'\n t.value = 'hrated'\n return t\n\ndef t_LRATEDATTR(t):\n r'Lowest\\ Rated:'\n t.value = 'lrated'\n return t\n\ndef t_MOVIE(t):\n r'\\n(\\s+)([A-Za-z0-9\\(\\)\\s,.-]+)\\n(\\s+)
'\n idx = t.value.find('>')\n t.value = (t.value)[idx+1 : -4].strip()\n return t\n\ndef t_MLISTATTR(t):\n r'Filmography'\n t.value = 'mlist'\n return t\n\ndef t_MLIST(t):\n r''\n s = (t.value).find('>')\n t.value = (t.value)[s+1:-4]\n return t\n\ndef t_SIMMOVIEATTR(t):\n r'You\\ might\\ also\\ like'\n t.value = 'ymal'\n return t\n\ndef t_SIMMOVIEREF(t):\n r''\n t.value = (t.value)[9:-45]\n return t\n\ndef t_SIMMOVIE(t):\n r'([A-Za-z0-9,\\(\\)\\s:\\'\".-]+)'\n t.value = (t.value)[63:-7]\n return t\n\n\ndef t_WHERETOATTR(t):\n r'Where\\ to\\ watch'\n t.value = 'wheretowatch'\n return t\n\ndef t_WHERETO(t):\n r'data-affiliate=\"([A-Za-z-]+)\"'\n t.value = (t.value)[16:-1].replace('-' , ' ')\n return t\n\ndef t_END(t):\n r''\n t.value = 'end'\n return t\n\n#--------------------------------GRAMMAR PRODUCTIONS -------------------------------\n\ndef p_start(p):\n '''start : pair start\n | END\n '''\n\ndef p_pair(p):\n '''pair : attribute value'''\n\n attr.append(p[1])\n attrval.append(p[2])\n\ndef p_attribute(p):\n '''attribute : WRITERATTR\n | DIRECTORATTR\n | PRODUCERATTR\n | MOVIENAMEATTR\n | MOVIEDESCRIPATTR\n | RUNTIMEATTR\n | BOXOFFICEATTR\n | CASTATTR\n | OGLANGATTR\n | GENREATTR\n | SIMMOVIEATTR\n | HRATEDATTR\n | LRATEDATTR\n | BDAYATTR\n | MLISTATTR\n | WHERETOATTR\n '''\n p[0] = p[1]\n\ndef p_value(p):\n '''value : celeb\n | castnchar\n | val\n | refnmovie\n | mlistrec\n | where\n '''\n p[0] = p[1]\n\ndef p_castnchar(p):\n '''castnchar : castnchar CASTREF CAST ROLE\n | castnchar CAST ROLE\n | CAST ROLE\n | CASTREF CAST ROLE\n '''\n if len(p) > 4:\n p[0] = p[1] + [(p[2] , p[3] , p[4])]\n elif len(p) > 3 and type(p[1]) == str:\n p[0] = [(p[1] , p[2] , p[3])]\n elif len(p) > 3:\n p[0] = p[1] + [('' , p[2] , p[3])]\n else:\n p[0] = [('' , p[1] , p[2])]\n\ndef p_celeb(p):\n '''celeb : celeb CELEBRITY\n | CELEBRITY\n '''\n if len(p) > 2:\n p[0] = p[1] + [p[2]]\n else:\n p[0] = [p[1]]\n\ndef p_val(p):\n '''val : MOVIENAME\n | MOVIEDESCRIP\n | OGLANG\n | BOXOFFICE\n | RUNTIME\n | GENRE\n | MOVIE\n | BDAY CELEBRITY\n '''\n p[0] = p[1]\n\ndef p_where(p):\n '''where : where WHERETO\n | WHERETO\n '''\n if len(p) > 2:\n p[0] = p[1] + [p[2]]\n else:\n p[0] = [p[1]]\n\ndef p_refnmovie(p):\n '''refnmovie : refnmovie SIMMOVIEREF SIMMOVIE\n | SIMMOVIEREF SIMMOVIE\n '''\n if len(p) == 4:\n p[0] = p[1] + [(p[2] , p[3])]\n else:\n p[0] = [(p[1] , p[2])]\n\ndef p_mlistrec(p):\n '''mlistrec : mlistrec MLIST\n | MLIST\n '''\n if len(p) > 2:\n lst = []\n sidx = p[2].find('data-title')\n eidx = p[2].find('data-boxoffice')\n lst.append(p[2][sidx+12 : eidx-2].replace('\\n' , '').strip())\n sidx = p[2].find('data-year')\n lst.append(p[2][sidx+11:-1])\n p[0] = p[1] + [lst]\n\n else:\n lst = []\n sidx = p[1].find('data-title')\n eidx = p[1].find('data-boxoffice')\n lst.append(p[1][sidx+12 : eidx-1].replace('\\n' , '').strip())\n sidx = p[1].find('data-year')\n lst.append(p[1][sidx+11:-1])\n p[0] = [lst]\n\ndef p_error(p):\n pass\n#--------------------------------RUNNING THE PARSER-----------------------------------\n\n\ndef download_page(url , fname):\n url = \"https://www.rottentomatoes.com\" + url\n print(url)\n response = urllib.request.urlopen(url)\n webContent = response.read()\n f = open(fname , 'wb')\n f.write(webContent)\n f.close()\n return fname\n\n\ndef main():\n global attr , attrval\n attr = []\n attrval = []\n Queries = {'you might also like':'ymal' ,\n 'where to watch':'wheretowatch' ,\n 'birthday':'bday' ,\n 'highest rated movie':'hrated' ,\n 'lowest rated movie':'lrated' ,\n 'story' : 'story' ,\n 'name' : 'name' ,\n 'language':'language' ,\n 'genre':'genre' ,\n 'box office':'boxoffice' ,\n 'runtime' : 'runtime' ,\n 'cast' : 'cast' ,\n 'director' : 'director' ,\n 'writer' : 'writer' ,\n 'producer' : 'producer' ,\n 'other movies' : 'othermovies' ,\n 'quit' : 'quit'\n }\n #result = {'name' , 'story' , 'writer' , 'director' , 'producer' , 'runtime' , 'boxoffice' , 'cast' , 'language'}\n fp = open(\"moviefile.html\")\n data = fp.read()\n lexer = lex.lex()\n parser = yacc.yacc()\n parser.parse(data)\n os.remove('moviefile.html')\n\n # for i in range(len(attr)):\n # print(attr[i] , attrval[i])\n\n\n exit = False\n while not exit:\n query = input(\"What details you want:\")\n try:\n query = Queries[query]\n except:\n print('Requested Query not available!')\n continue\n\n if query.lower() == 'quit':\n exit = True\n elif query.lower() == 'ymal':\n try:\n idx = attr.index('ymal')\n mlist = []\n for i in range(len(attrval[idx])):\n print(str(i+1) + '. ' + attrval[idx][i][1])\n mlist.append(attrval[idx][i][1])\n quer = input('Enter a movie name:')\n try:\n midx = mlist.index(quer)\n url = attrval[idx][midx][0]\n fp = open(download_page(url , 'moviefile.html'))\n attr = []\n attrval = []\n data = fp.read()\n lexer = lex.lex()\n parser = yacc.yacc()\n parser.parse(data)\n os.remove('moviefile.html')\n except:\n print('Movie not available')\n except:\n print('Not Available!')\n\n elif query.lower() == 'cast':\n idx = attr.index('cast')\n clist = []\n for i in range(len(attrval[idx])):\n print(str(i+1) + '. ' + attrval[idx][i][1])\n clist.append(attrval[idx][i][1])\n quer = input('know about which cast member:')\n cidx = clist.index(quer)\n if cidx == -1:\n print('No cast member present with given name')\n else:\n url = attrval[idx][cidx][0]\n download_page(url , 'castfile.html')\n data = open('castfile.html').read()\n attr = []\n attrval = []\n lexer = lex.lex()\n parser = yacc.yacc()\n parser.parse(data)\n os.remove('castfile.html')\n\n #for i in range(len(attr)):\n #print(attr[i] , attrval[i])\n\n\n cexit = False\n\n while not cexit:\n quer = input('What is the query?')\n\n try:\n quer = Queries[quer]\n except:\n print('Requested Query not available!')\n continue\n\n if quer == 'othermovies':\n year = input('year?')\n i=1\n year = int(year)\n midx = attr.index('mlist')\n for movie in attrval[midx]:\n if int(movie[1]) >= year:\n i += 1\n print(str(i)+'. '+movie[0])\n\n elif quer == 'quit':\n cexit = True\n exit = True\n\n else:\n try:\n idx = attr.index(quer)\n print(attr[idx] + ' ' + attrval[idx])\n except:\n print('Not available')\n\n\n elif query.lower() in attr:\n try:\n idx = attr.index(query)\n print(query + ':' , attrval[idx])\n except:\n print(query + ' information is not available')\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"code2.py","file_name":"code2.py","file_ext":"py","file_size_in_byte":14949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"41216833","text":"import glob \nfrom collections import defaultdict\nfrom string import punctuation\nspecial = list(set(punctuation)) # special characters which we need to remove\npath = '/home/vishal97/IRtest/*.txt' #change it to your path!\nfiles=glob.glob(path)\ninverted= defaultdict(lambda: defaultdict(set))\nfreq = defaultdict(int) # dicitonary with the frequency of each word\nfileNames = []\ndef Set(d):\n\ts = set()\n\tfor i in d:\n\t\ts.add(i)\n\treturn(s)\ndef removeSpecial(word): # to remove special characters, there could be a more optimal way to do it\n\tnewWord = word\n\tfor i in word:\n\t\tif(i in special):\n\t\t\tnewWord = newWord.replace(i, \"\") #removing the special character if present in the string\n\treturn(newWord)\ndef toLower(word): # making the words case insensitive\n\tnewWord = word.lower()\n\treturn(newWord)\ndef parse(string):\n\ti = string.rfind('/')\n\treturn(string[i+1:])\ndef search(string):\n\tfor i in inverted[string]:\n\t\tprint(i,\":\", fileNames[i-1])\ndef intersectQ(string):\n\tl = string.split()\n\tfirst = l[0]\n\tsecond = l[-1]\n\ta = Set(inverted[first])\n\tb = Set(inverted[second])\n\tinterSet = a.intersection(b)\n\tfor i in interSet:\n\t\tprint(i,\":\", fileNames[i-1])\ndef unionQ(string):\n\tl = string.split()\n\tfirst = l[0]\n\tsecond = l[-1]\n\ta = Set(inverted[first])\n\tb = Set(inverted[second])\n\tinterSet = a.union(b)\n\tfor i in interSet:\n\t\tprint(i,\":\", fileNames[i-1])\nfor i in range(len(files)):\n\tf=open(files[i], 'r')\n\tfileNames.append(parse(files[i]))\n\tj = 0\n\tfor words in f.read().split(): #split by default splits it by space\n\t\tif(words in inverted):\n\t\t\tfreq[words]+=1 # increase the value of the word which is a key by 1 when it's found\n\t\t\tinverted[words][i+1].add(j) # this adds the docid which is (i+1) and to it's set it adds the position\n\t\t\tj+=1\n\t\telse:\n\t\t\twords = removeSpecial(words)\n\t\t\twords = toLower(words)\n\t\t\tfreq[words]+=1 # increase the value of the word which is a key by 1 when it's found\n\t\t\tinverted[words][i+1].add(j)\n\t\t\tj+=1\n\tf.close()\n\nprint(\"Query for any AND any type\")\nintersectQ(\"snake AND frog\")\nprint()\nprint(\"Query for any OR any type\")\nunionQ(\"snake OR frog\")\nprint()\nprint(\"Query for a string\")\nsearch(\"snake\")\nprint()\n\n\n \n","sub_path":"invertedI.py","file_name":"invertedI.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"43831219","text":"import torch\nfrom pytorchyolo import models\n\nfrom nni.compression.pytorch import ModelSpeedup\nfrom nni.algorithms.compression.pytorch.pruning import L1FilterPruner, LevelPruner\nfrom nni.compression.pytorch.utils import not_safe_to_prune\n\n# The Yolo can be downloaded at https://github.com/eriklindernoren/PyTorch-YOLOv3.git\nprefix = '/home/user/PyTorch-YOLOv3' # replace this path with yours\n# Load the YOLO model\nmodel = models.load_model(\n \"%s/config/yolov3.cfg\" % prefix, \n \"%s/yolov3.weights\" % prefix).cpu()\nmodel.eval()\ndummy_input = torch.rand(8, 3, 320, 320)\nmodel(dummy_input)\n# Generate the config list for pruner\n# Filter the layers that may not be able to prune\nnot_safe = not_safe_to_prune(model, dummy_input)\ncfg_list = []\nfor name, module in model.named_modules():\n if name in not_safe:\n continue\n if isinstance(module, torch.nn.Conv2d):\n cfg_list.append({'op_types':['Conv2d'], 'sparsity':0.6, 'op_names':[name]})\n# Prune the model\npruner = L1FilterPruner(model, cfg_list)\npruner.compress()\npruner.export_model('./model', './mask')\npruner._unwrap_model()\n# Speedup the model\nms = ModelSpeedup(model, dummy_input, './mask')\n\nms.speedup_model()\nmodel(dummy_input)\n\n","sub_path":"examples/model_compress/pruning/legacy/speedup/speedup_yolov3.py","file_name":"speedup_yolov3.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"615096576","text":"import time\nfrom brteve.brt_eve_bt817_8 import BrtEve\n\nMIN_TOUCH = 14\nMIN_MOVE = 0.1 # pixel / milis\nMAX_MOVE = 2 # pixel / milis\nMAX_DELAY = 1000 # milisecond\n\n# return value is the number of milliseconds that have elapsed since the system was started.\ndef milis():\n return round(time.monotonic_ns() / 1000_000)\n \nclass helper_scroller():\n def __init__(self, limit_top = 0, limit_bottom = 0, speed=1, friction=0.9) -> None:\n self._limit_t = limit_top\n self._limit_b = limit_bottom\n self._speed = speed\n self._friction = friction\n\n self.last_xy = 0\n self.last_milis_touch = 0\n self.last_milis = 0\n\n self._last_offset = 0\n self._last_velocity = 0\n self._back_to_top = 0\n self._back_to_bottom = 0\n self._last_no_touch = 0\n\n def set_limit(self, top, bottom):\n self._limit_t = top\n self._limit_b = bottom\n\n def set_speed(self, speed):\n self._speed = speed\n\n def set_friction(self, friction):\n self._friction = friction\n\n def stop(self):\n self._last_velocity = 0\n\n def _stop_and_run_back(self):\n padding = 100\n # back to top\n if self._last_offset > self._limit_t + padding:\n self._back_to_top = 1\n self._last_velocity = 0\n if self._back_to_top == 1:\n distance = round(abs(self._last_offset - self._limit_t) / 2)\n if distance < 3:\n self._back_to_top = 0\n self._last_offset = self._limit_t\n else:\n self._last_offset -= distance\n\n # back to bottom\n if self._last_offset < self._limit_b - padding:\n self._back_to_bottom = 1\n self._last_velocity = 0\n if self._back_to_bottom == 1:\n distance = round(abs(self._last_offset - self._limit_b) / 2)\n if distance < 3:\n self._back_to_bottom = 0\n self._last_offset = self._limit_b\n else:\n self._last_offset += distance\n\n def set_offset_vloc(self, offset, vloc):\n self._last_offset = offset\n self._last_velocity = vloc\n\n def get_offset_velocity(self, new_position_xy):\n time = milis()\n delay = max(1, time - self.last_milis_touch)\n\n distance = new_position_xy - self.last_xy\n\n if new_position_xy == 32768: # no touch, friction is in affect\n self._last_velocity = round(self._friction * self._last_velocity, 3)\n if abs(self._speed * self._last_velocity) < 0.02:\n self._last_velocity = 0\n\n self._last_offset += round(self._last_velocity * (time - self.last_milis))\n self._last_no_touch = 1\n self._stop_and_run_back()\n else:\n velocity = distance / delay # v = pixel / milisecond\n self.last_milis_touch = time\n\n if self._last_no_touch == 1 or delay > MAX_DELAY: # This is a single touch\n self.last_xy = new_position_xy\n self._back_to_top = 0\n self._back_to_bottom = 0\n self._last_velocity = 0\n else: # this is a swipe\n self._last_velocity = velocity\n self.last_xy = new_position_xy\n self._last_offset += round(self._last_velocity * delay)\n self._last_no_touch = 0\n\n self.last_milis = time\n return self._last_offset, self._last_velocity\n","sub_path":"circuitPython/examples/audio-playback/audio_playback/helper_scroller.py","file_name":"helper_scroller.py","file_ext":"py","file_size_in_byte":3486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"543011407","text":"from urllib.parse import urlencode, urlparse, parse_qs\nfrom lxml.html import fromstring\nfrom requests import get\nfrom bs4 import BeautifulSoup\nimport nltk\nimport requests\nimport re\nimport heapq\nfo=\"\"\ndef RankUCal(input_msg):\n target_text=str(input_msg)\n fo=\"\"\n for n in range(2):\n target_url=\"https://www.google.com/search?q=\"+str(target_text)+\"&tbm=nws&start=\"+str(n)\n raw = get(target_url).text\n page = fromstring(raw)\n for result in page.cssselect(\".r a\"):\n url = result.get(\"href\")\n if url.startswith(\"/url?\"):\n url = parse_qs(urlparse(url).query)['q']\n fo=fo+str(url[0])\n response = requests.get(url[0])\n soup = BeautifulSoup(response.content, \"html.parser\")\n links = soup.findAll(\"p\")\n fo=fo+\"\\n\\n\"+re.sub('<[^>]+>', '',(str(links)).lower())\n \n #NE Tagging Target Text, Important Words Shortlisted \n namedEnt = nltk.pos_tag(nltk.word_tokenize(target_text))\n named_entities = []\n search_query = []\n for x in namedEnt:\n if x[1] == 'NNP':\n named_entities.append(x[0])\n i=0\n \n while(True):\n try:\n search_query.append(str(named_entities[i][0][0]))\n except Exception as e:\n break\n i+=1\n \n k=0\n count=[0]*len(search_query)\n while(k rect.left) and (x < rect.right) and (y > rect.top) and (y < rect.bottom):\r\n\t\treturn True\r\n\r\ndef isCollision(drop,playerTab):\r\n\tif ((isPointInsideRect(drop.left, drop.top, playerTab)) or (isPointInsideRect(drop.left, drop.bottom, playerTab)) or (isPointInsideRect(drop.right, drop.top, playerTab)) or (isPointInsideRect(drop.right, drop.bottom, playerTab))):\r\n\t\treturn True\r\n\treturn False\r\n\r\ndef GameOver(killCount):\r\n\tfont = pygame.font.Font('Calibri.ttf', 32)\r\n\ttext = font.render(\"Game Over\", True, RED, BLACK)\r\n\ttextRect = text.get_rect() \r\n\ttextRect.center = (300,200)\r\n\tscreen.blit(text, textRect)\r\n\t\r\n\tfont = pygame.font.Font('Calibri.ttf', 30)\r\n\ttext = font.render(\"Your Score = {}\".format(killCount), True, RED, BLACK)\r\n\ttextRect = text.get_rect() \r\n\ttextRect.center = (300,250)\r\n\tscreen.blit(text, textRect)\r\n\t\r\n\tfont = pygame.font.Font('Calibri.ttf', 28)\r\n\ttext = font.render(\"Press (R) to Rest\", True, RED, BLACK)\r\n\ttextRect = text.get_rect() \r\n\ttextRect.center = (300,300)\r\n\tscreen.blit(text, textRect)\r\n\treturn True\r\n\t\r\ndef drawGame(player,fall,isGameOver):\r\n\tglobal screen,killCount,playerLength\r\n\tpSurface = pygame.Surface((playerLength,playerWidth))\r\n\tpSurface.fill(RED)\r\n\tscreen.blit(catImg,player)\r\n\tcollisionDetected = False\r\n\tnextFall = []\r\n\t\r\n\tfor drop in fall:\r\n\t\tdSurface = pygame.Surface(fallSize)\r\n\t\tdSurface.fill(WHITE)\r\n\t\tscreen.blit(ballImg,drop)\r\n\t\tif drop[1] < 400:\r\n\t\t\tnextFall.append((drop[0],drop[1]+1))\r\n\t\telse:\r\n\t\t\tif not isGameOver:\r\n\t\t\t\tkillCount += 1\r\n\t\t\tif killCount %50 == 0:\r\n\t\t\t\tprint(killCount)\r\n\t\tif not collisionDetected:\r\n\t\t\tcollisionDetected = isCollision(pygame.Rect(drop[0],drop[1],fallSize[0],fallSize[1]), pygame.Rect(player[0],player[1],playerLength,playerWidth))\r\n\t\t\t\r\n\treturn nextFall, collisionDetected\r\n\r\ndef drawPlayer():\r\n\tglobal screen\r\n\tsurface = pygame.Surface((playerLength,1))\r\n\tsurface.fill(RED)\r\n\tscreen.blit(surface,player)\r\n\r\ndef movePlayer(side):\r\n\tglobal player\r\n\tif side == LEFT:\r\n\t\tplayer[0] -= 20\r\n\tif side == RIGHT:\r\n\t\tplayer[0] += 20\r\n\tif player[0]<10:\r\n\t\tplayer[0]=10\r\n\tif player[0]>590:\r\n\t\tplayer[0]=590\r\n\r\ndef NewDrop():\r\n\tglobal fall\r\n\tx,y = random.randint(1,599),random.randint(1,199)\r\n\tfor drops in fall:\r\n\t\tif x >= drops[0] and x <= drops[0]+(fallSize[0]*2):\r\n\t\t\treturn\r\n\tfall.append((x,y))\r\n\t\r\ndef drawfall():\r\n\tglobal fall,killCount\r\n\tfor drop in fall:\r\n\t\tsurface = pygame.Surface((1,1))\r\n\t\tsurface.fill(WHITE)\r\n\t\tscreen.blit(surface,drop)\r\n\t\tfall.remove(drop)\r\n\t\tif drop[1] < 400:\r\n\t\t\tfall.append((drop[0],drop[1]+1))\r\n\t\telse:\r\n\t\t\tkillCount += 1\r\n\t\t\tprint(killCount)\r\n\t\r\n\t\r\ndef isCollisionOld(player,fall):\r\n\tfor breadthPoint in range(0,playerLength):\r\n\t\tif (player[0]+breadthPoint,player[1]) in fall:\r\n\t\t\treturn True\r\n\treturn False\r\n\r\ndef main():\r\n\tdifficultyControl = 0\r\n\tglobal player,fall,killCount,screen,playerLength,fallDropFrequency\r\n\tisGameOver=False\r\n\tkillCount = 1\r\n\tfallDropFrequency = 400\r\n\tplayer = [random.randint(1,599),300]\r\n\tfall = []\r\n\tpygame.init()\r\n\tscreen = pygame.display.set_mode((600,400))\r\n\tscreen.fill(BLACK)\r\n\tclock = pygame.time.Clock()\r\n\t\r\n\tADDfallDROP = pygame.USEREVENT + 1\r\n\tpygame.time.set_timer(ADDfallDROP, fallDropFrequency)\r\n\t\r\n\tscreen.fill(WHITE)\r\n\twhile True:\r\n\t\tscreen.fill(WHITE)\r\n\t\tfor event in pygame.event.get():\r\n\t\t\tif event.type == pygame.QUIT:\r\n\t\t\t\tquit()\r\n\t\t\tif event.type == pygame.KEYDOWN:\r\n\t\t\t\tif event.key == pygame.K_ESCAPE:\r\n\t\t\t\t\tquit()\r\n\t\t\t\tif event.key == pygame.K_r:\r\n\t\t\t\t\tkillCount = 0\r\n\t\t\t\t\tisGameOver = False\r\n\t\t\t\t\tfallDropFrequency = 200\r\n\t\t\t\t\tfall.clear()\t\r\n\t\t\t\t\tscreen.fill(WHITE)\r\n\t\t\t\t\t#playerLength = 10\r\n\t\t\tif not isGameOver and event.type == pygame.KEYDOWN:\r\n\t\t\t\tif event.key == pygame.K_LEFT:\r\n\t\t\t\t\tmovePlayer(LEFT)\r\n\t\t\t\tif event.key == pygame.K_RIGHT:\r\n\t\t\t\t\tmovePlayer(RIGHT)\r\n\t\t\tif event.type == ADDfallDROP:\r\n\t\t\t\tnewThread = threading.Thread(target=NewDrop, args=())\r\n\t\t\t\tnewThread.start()\r\n\t\t\t\t\t\r\n\t\tfall, collisionDetected = drawGame(player,fall,isGameOver)\r\n\t\t\r\n\t\t# ~ newThread = threading.Thread(target=drawGame, args=(player,fall,isGameOver))\r\n\t\t# ~ newThread.start()\r\n\t\t# ~ fall, collisionDetected = newThread.join()\r\n\t\t\r\n\t\t# ~ print(collisionDetected)\r\n\t\t# ~ print(killCount)\r\n\t\tif collisionDetected or isGameOver:\r\n\t\t\tisGameOver=GameOver(killCount)\r\n\t\t\r\n\t\tif killCount % 50 == 0 and difficultyControl!=killCount:\r\n\t\t\tdifficultyControl=killCount\r\n\t\t\tfallDropFrequency -= 20\r\n\t\t\t#playerLength += 5\r\n\t\t\r\n\t\tpygame.display.update()\r\n\t\tclock.tick(60)\r\n\r\nif __name__ == '__main__':\r\n\tmain()\r\n","sub_path":"AvoidBall.py","file_name":"AvoidBall.py","file_ext":"py","file_size_in_byte":4774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"619211861","text":"# coding: gbk\n\"\"\"\n@author: sdy\n@email: sdy@epri.sgcc.com.cn\n\"\"\"\n\nfrom core.power import Power\nfrom core.topo import PowerGraph\n\n\ndef test_topo():\n path = '../dataset/wepri36'\n fmt = 'off'\n power = Power(fmt)\n power.load_power(path, fmt=fmt, lp=False, st=False, station=True)\n graph1 = PowerGraph(power, graph_type='single', node_type='station', on_only=True)\n islands1 = graph1.get_islands(min_num=5)\n print(islands1)\n graph2 = PowerGraph(power, graph_type='multi', node_type='bus',\n on_only=False, edge_columns=['x'])\n islands2 = graph2.get_islands(min_num=10)\n print(islands2)\n\n\nif __name__ == '__main__':\n test_topo()","sub_path":"tests/test_topo.py","file_name":"test_topo.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"262417891","text":"#!/usr/bin/env python\r\n# -- coding: utf-8 --\r\nimport sys\r\nfrom company_industry.config import *\r\nimport jieba\r\n# from gensim import corpora\r\nimport numpy as np\r\nimport os\r\nimport operator\r\nfrom jieba import posseg as pseg\r\nfrom math import log\r\n\r\nreload(sys)\r\nsys.setdefaultencoding(\"utf-8\")\r\n\r\nclass Dataprocess():\r\n def __init__(self):\r\n pass\r\n\r\n def get_fenci(self,file_name):\r\n with open(process_data_path+'company_industry_id\\\\'+file_name,'r') as fr,open(process_data_path+'company_industry_id_fenci\\\\'+file_name,'w') as fw:\r\n lines=fr.readlines()\r\n for line in lines:\r\n print(line.strip())\r\n print(line.strip().split('\\x01')[1])\r\n print(','.join(jieba.cut(line.strip().split('\\x01')[1],cut_all=False)))\r\n fw.write(','.join(jieba.cut(line.strip().split('\\x01')[1],cut_all=False))+'\\x01'+line.strip().split('\\x01')[2]+'\\n')\r\n\r\n def fenci_all_file(self):\r\n for file in os.listdir(process_data_path+'company_industry_id\\\\'):\r\n print(file)\r\n self.get_fenci(file)\r\n\r\n\r\n def get_vocab_freq_dict(self,file_name):\r\n with open(process_data_path+'company_industry_id_fenci\\\\'+file_name,'r') as fr,open(process_data_path+'company_kw_freq_dict\\\\'+file_name,'w') as fw:\r\n doc_list=[]\r\n lines=fr.readlines()\r\n for line in lines:\r\n print(line.strip().split('\\x01')[0])\r\n doc_list.append(line.strip().split('\\x01')[0].split(','))\r\n\r\n dictionary = corpora.Dictionary(doc_list)\r\n row_num=len(doc_list)\r\n col_num = len(dictionary)\r\n print(row_num,col_num)\r\n bow_corpus = [dictionary.doc2bow(text) for text in doc_list]\r\n\r\n doc_array = np.zeros((row_num, col_num), dtype=int)\r\n for i, j in enumerate(bow_corpus):\r\n for word_index, freq in j:\r\n doc_array[i][word_index] = freq\r\n\r\n vocab_dict = {}\r\n for k, v in dictionary.items():\r\n vocab_dict[v] = sum(doc_array[:, int(k)])\r\n for t, s in vocab_dict.items():\r\n if s == 1:\r\n vocab_dict.pop(t)\r\n print(len(vocab_dict))\r\n sort_dict=sorted(vocab_dict.iteritems(),key=operator.itemgetter(1),reverse=True)\r\n for key,value in sort_dict:\r\n percent=float(value)/float(row_num)\r\n print(key+':'+str(value))\r\n fw.write(key+'\\x01'+str(value)+'\\x01'+format(percent,'.2f')+'\\n')\r\n\r\n def get_vocab_dict_all(self):\r\n for file in os.listdir(process_data_path+'company_industry_id_fenci\\\\'):\r\n print(file)\r\n self.get_vocab_freq_dict(file)\r\n\r\nclass Dataprocess_outns():\r\n '''\r\n 从公司名字中去掉地名之后做一系列处理\r\n '''\r\n def __init__(self):\r\n pass\r\n\r\n def fenci_out_ns(self,file_name):\r\n '''\r\n 分词去除地名,将结果写入company_fenci_outns文件夹下对应industryid名的文本文件内\r\n :param file_name:\r\n :return:\r\n '''\r\n with open(process_data_path+'company_industry_id\\\\'+file_name,'r') as fr,open(process_data_path+'company_fenci_outns\\\\'+file_name,'w') as fw:\r\n lines=fr.readlines()\r\n for line in lines:\r\n print(line.strip())\r\n fenci = pseg.cut(line.strip().split('\\x01')[1])\r\n fenci_list = list(fenci)\r\n result_list = [k for (k, v) in fenci_list if v != 'ns']\r\n fw.write(','.join(result_list)+'\\n')\r\n\r\n def fenxi_outns_all(self):\r\n '''\r\n 处理company_industry_id文件夹下所有id的文件,其中company_industry_id文件夹中的文本已经删除了其他、跨领域和多出来的300200等其他杂项,剩余50类,即50个文本文件\r\n :return:\r\n '''\r\n for file in os.listdir(process_data_path+'company_industry_id\\\\'):\r\n print(file)\r\n self.fenci_out_ns(file)\r\n\r\n def get_vocab_dict_outns(self,file_name):\r\n '''\r\n 获取没类id的词集以及其中每个词在本类中出现的频数和频率,结果写入company_fenci_outns_dict文件夹下的对应industryid文本文件中\r\n :param file_name:\r\n :return:\r\n '''\r\n with open(process_data_path + 'company_fenci_outns\\\\' + file_name, 'r') as fr, open(\r\n process_data_path + 'company_fenci_outns_dict\\\\' + file_name, 'w') as fw:\r\n lines = fr.readlines()\r\n row_num = len(lines)\r\n vocab_dict = {}\r\n line_list = [line.strip().split(',') for line in lines]\r\n for line in line_list:\r\n for kw in line:\r\n if kw in vocab_dict:\r\n vocab_dict[kw] += 1\r\n else:\r\n vocab_dict[kw] = 1\r\n\r\n for t, s in vocab_dict.items():\r\n if s == 1:\r\n vocab_dict.pop(t)\r\n print(len(vocab_dict))\r\n sort_dict = sorted(vocab_dict.iteritems(), key=operator.itemgetter(1), reverse=True)\r\n for key, value in sort_dict:\r\n percent = float(value) / float(row_num)\r\n print(key + ':' + str(value))\r\n fw.write(key + '\\x01' + str(value) + '\\x01' + format(percent, '.5f') + '\\n')\r\n\r\n def get_vocab_dict_all(self):\r\n '''\r\n 处理company_fenci_outns下所有文件,得到对应的词集频数频率文件\r\n :return:\r\n '''\r\n for file in os.listdir(process_data_path+'company_fenci_outns\\\\'):\r\n print(file)\r\n self.get_vocab_dict_outns(file)\r\n\r\nclass Industryhuizong():\r\n '''\r\n 汇总各industryid词集,组成整体的词集,并进行后续统计\r\n '''\r\n def __init__(self):\r\n pass\r\n\r\n def get_all_industry_vocab(self):\r\n '''\r\n 根据各industry下的词集获取总体词集,并计算其在所有50类中在多少类中出现,在此经过后期调整,将个别词设置出现的类数目为51类,\r\n 为了便于后续计算时最大程度减小其权重。权重的计算原理类似于idf,为上述(51/上述频率)的对数值,将结果按照词+'\\x01'+频数+'\\x01'\r\n +对数值的行,写入industry_vocab_dict文件中\r\n :return:\r\n '''\r\n with open(process_data_path + 'industry_vocab_dict','w') as fw:\r\n industry_vocab_dict = {}\r\n for file in os.listdir(process_data_path + 'company_fenci_outns_dict\\\\'):\r\n print(file)\r\n with open(process_data_path + 'company_fenci_outns_dict\\\\'+file,'r') as fr:\r\n kw_list=[line.strip().split('\\x01')[0] for line in fr.readlines()]\r\n for kw in kw_list:\r\n if kw in industry_vocab_dict:\r\n industry_vocab_dict[kw]+=1\r\n else:\r\n industry_vocab_dict[kw]=1\r\n industry_vocab_dict['有限公司']=51\r\n industry_vocab_dict['有限责任'] = 51\r\n industry_vocab_dict['分公司'] = 51\r\n industry_vocab_dict['中心'] = 51\r\n industry_vocab_dict['科技'] = 51\r\n industry_vocab_dict['公司'] = 51\r\n industry_vocab_dict['国际'] = 51\r\n industry_vocab_dict['创新'] = 51\r\n industry_vocab_dict['管理'] = 51\r\n industry_vocab_dict['股份'] = 51\r\n sort_industry_vocab_dict=sorted(industry_vocab_dict.iteritems(),key=operator.itemgetter(1),reverse=True)\r\n for key,value in sort_industry_vocab_dict:\r\n print(key+':'+str(len(key)))\r\n log_value=log(float(51)/float(value))\r\n if value>1 and len(key)>3:\r\n fw.write(key+'\\x01'+str(value)+'\\x01'+format(log_value,'.5f')+'\\n')\r\n\r\n def get_kw_weight(self,file_name):\r\n '''\r\n 根据industry_vocab_dict中的总体权重和各个industryid下的行业频率相乘计算每个词在各自所在的行业中的权重,并按照计算出来的权重由大到小排名\r\n 结果写入company_kw_weight_dict文件夹下各id对应的文档中。词+'\\x01'+权重,仅保留权重大于0的词\r\n :param file_name:\r\n :return:\r\n '''\r\n with open(process_data_path + 'industry_vocab_dict','r') as fr:\r\n kw_log_dict={line.strip().split('\\x01')[0]:float(line.strip().split('\\x01')[2]) for line in fr.readlines()}\r\n with open(process_data_path + 'company_fenci_outns_dict\\\\'+file_name,'r') as fr1,open (\r\n process_data_path + 'company_kw_weight_dict\\\\'+file_name,'w') as fw:\r\n kw_percent_dict={line.strip().split('\\x01')[0]:float(line.strip().split('\\x01')[2]) for line in fr1.readlines()}\r\n kw_weight_dict={}\r\n for k in kw_percent_dict:\r\n if k in kw_log_dict:\r\n kw_weight=kw_percent_dict[k]*kw_log_dict[k]\r\n print(kw_weight)\r\n kw_weight_dict[k]=kw_weight\r\n sort_kw_weight_dict=sorted(kw_weight_dict.iteritems(),key=operator.itemgetter(1),reverse=True)\r\n for kw,weight in sort_kw_weight_dict:\r\n if weight>0:\r\n fw.write(kw+'\\x01'+format(weight,'.10f')+'\\n')\r\n\r\n def get_kw_weight_all(self):\r\n for file in os.listdir(process_data_path + 'company_fenci_outns_dict\\\\'):\r\n self.get_kw_weight(file)\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n # Dataprocess()注销不用\r\n # model=Dataprocess()\r\n # model.fenci_all_file()\r\n # model.get_vocab_dict_all()\r\n\r\n # model=Dataprocess_outns()\r\n # model.fenxi_outns_all()\r\n # model.get_vocab_dict_all()\r\n\r\n model=Industryhuizong()\r\n model.get_all_industry_vocab()\r\n model.get_kw_weight_all()\r\n","sub_path":"company_info_0/company_industry/data_process/data_process.py","file_name":"data_process.py","file_ext":"py","file_size_in_byte":10003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"633443227","text":"# -*- coding: utf-8 -*-\n'''\ninterface.py\n\nThe purpose of this module is two-fold:\n\n1. Acting as a \"dispatcher\" to select the appropriate function/module\ndepending on which Python function is used.\n\n2. Creating an interface (or bridge) between modules (such as plugins)\nand the Crunchy core so that these modules can be tested as independently\nas possible from those in Crunchy's core.\n\n'''\nimport imp\nimport os\nimport sys\npython_version = sys.version_info[0] + sys.version_info[1]/10.0\n\n# StringIO is used for creating in-memory files\n# We also take a page from Django's book and create an identifiable\n# string/bytes type.\nif python_version < 3: # kept for reference\n from StringIO import StringIO\n crunchy_bytes = str\n crunchy_unicode = unicode\nelse:\n from io import StringIO\n crunchy_bytes = bytes\n crunchy_unicode = str\n\n\n# Some special functions, specific to a given\n# Python version are defined below\nimport src.tools as tools\nu_print = tools.u_print\nu_join = tools.u_join\nexec_code = tools.exec_code\n\n# Rather than having various modules importing the configuration.py,\n# CrunchyPlugin.py, etc.,\n# we will set things up so that the relevant will populate the\n# following dictionary when it is loaded; however, it will be possible\n# to artificially populate it as well from other sources enabling\n# independent unit testing.\n\naccounts = {} # initialized in crunchy.py\nadditional_vlam = {} # initialized from plugins by CrunchyPlugin.py\nadditional_menu_items = {}\nadditional_properties = {} # initialized by various plugins\nnames = {}\nconfig = {} # initialized mostly by configuration.py\nplugin = {} # initialized by CrunchyPlugin.py\npreprocessor = {} # initialized via CrunchyPlugin.py\nserver = {} # initialized by pluginloader.py\ntranslate = {} # initialized below\nexams = {} #used by pluging exam_mode.py and vlam_doctest.py\nfrom_comet = {} # initialized from cometIO.py\nunknown_user_name = None\nlast_local_base_url = None\npath_info = {} # see rst_directives plugin\n\ndef get_base_dir():\n path = os.path.normpath(os.path.join(os.path.dirname(__file__),\n '..'))\n # Python 3: normpath() decodes by default into a string.\n\n if isinstance(path, str):\n return path\n return path.decode(sys.getfilesystemencoding())\nconfig['crunchy_base_dir'] = get_base_dir()\nplugin['crunchy_base_dir'] = get_base_dir\n\nimport src.translation as translation\ntranslate['_'] = translation._\ntranslate['init_translation'] = translation.init_translation\n\nfrom src.debug import debug\ndef debug_msg(data):\n \"\"\"write a debug message, debug messages always appear on stderr\"\"\"\n if data is None:\n data = 'None'\n sys.__stderr__.write(data)\n sys.__stderr__.write(\"\\n\")\n\n# We use ElementTree, if possible as ElementSoup in combination with\n# BeautifulSoup, in order to parse and process files.\n# ElementTree is part of Python as of version 2.5;\n# Nonetheless, we use a slightly customized version ... and an even\n# more customized one for Python 3.\n\nif python_version < 3:\n from src.element_tree import ElementTree\nelse:\n from src.element_tree3 import ElementTree\n\nElement = ElementTree.Element\nSubElement = ElementTree.SubElement\nfromstring = ElementTree.fromstring\ntostring = ElementTree.tostring\n\ninteractive = False # used with python crunchy -i option\n\nif python_version < 3:\n import pygments.token\n generic_output = pygments.token.STANDARD_TYPES[pygments.token.Generic.Output]\n generic_traceback = pygments.token.STANDARD_TYPES[pygments.token.Generic.Traceback]\n generic_prompt = pygments.token.STANDARD_TYPES[pygments.token.Generic.Prompt]\n comment = pygments.token.STANDARD_TYPES[pygments.token.Comment]\nelse:\n import pygments3.token\n generic_output = pygments3.token.STANDARD_TYPES[pygments3.token.Generic.Output]\n generic_traceback = pygments3.token.STANDARD_TYPES[pygments3.token.Generic.Traceback]\n generic_prompt = pygments3.token.STANDARD_TYPES[pygments3.token.Generic.Prompt]\n comment = pygments3.token.STANDARD_TYPES[pygments3.token.Comment]\n","sub_path":"crunchy/src/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":4070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"321336432","text":"import random\nimport torch\n\n\nclass TripletWrapper(torch.nn.Module):\n\n def __init__(self, query_encoder, candidate_encoder, sampling_type='random', margin=0.25, change_text_type=False):\n\n super().__init__()\n\n self.query_encoder = query_encoder\n self.candidate_encoder = candidate_encoder\n self.sampling_type = sampling_type\n self.margin = margin\n self.change_text_type = change_text_type\n \n def query2response(self):\n response = query\n response[:, 0] = query[:, 0] + 1\n return response\n\n @staticmethod\n def random_negatives(query, positive_candidates):\n\n possible_negatives = torch.cat([query, positive_candidates])\n\n batch_size = possible_negatives.size(0)\n\n indices = torch.randperm(batch_size)\n indices = indices[~(indices == torch.arange(batch_size))]\n indices = indices[:batch_size // 2]\n\n negative_candidates = possible_negatives[indices]\n\n return negative_candidates\n\n def semi_hard_negatives(self,\n query,\n positive_candidates,\n query_embed,\n positive_candidates_embed,\n positive_sim_matrix):\n\n possible_negatives = torch.cat([query, positive_candidates])\n\n with torch.no_grad():\n query_like_candidates = self.candidate_encoder(query.detach())\n\n possible_negatives_embed = torch.cat([query_like_candidates, positive_candidates_embed.detach()])\n negative_sim_matrix = torch.matmul(query_embed.detach(), possible_negatives_embed.t())\n\n sim_matrix_mask = torch.cat([1 - torch.eye(query.size(0)),\n 1 - torch.eye(query.size(0))],\n dim=1).to(query.device)\n\n negative_sim_matrix = negative_sim_matrix * sim_matrix_mask\n\n difference = positive_sim_matrix.detach().unsqueeze(-1).repeat(1, negative_sim_matrix.size(-1))\n difference = difference - negative_sim_matrix\n\n negative_sim_matrix[negative_sim_matrix == 0.] = -1.\n negative_sim_matrix[difference < 0] = -1.\n negative_sim_matrix[difference > self.margin] = -1.\n\n _, indices = negative_sim_matrix.max(dim=1)\n\n z = possible_negatives[indices]\n\n return z\n\n def forward(self, x, y, z=None):\n\n x_embed = self.query_encoder(x)\n y_embed = self.candidate_encoder(y)\n\n positive_sim_matrix = (x_embed * y_embed).sum(dim=1)\n \n query_as_response = x\n \n if self.change_text_type:\n query_as_response = self.query2response(query_as_response)\n\n if z is None:\n if self.sampling_type == 'random':\n z = self.random_negatives(query_as_response, y)\n elif self.sampling_type == 'hard':\n pass\n elif self.sampling_type in ['semi_hard', 'semi-hard']:\n z = self.semi_hard_negatives(query_as_response, y, x_embed, y_embed, positive_sim_matrix)\n else:\n ValueError('Unexpected sampling_type')\n\n z_embed = self.candidate_encoder(z)\n\n negative_sim_matrix = (x_embed * z_embed).sum(dim=1)\n\n loss = torch.relu(self.margin - positive_sim_matrix + negative_sim_matrix).mean()\n\n return loss\n\n\nclass ClassificationWrapper(TripletWrapper):\n\n def __init__(self, query_encoder, candidate_encoder, sampling_type='random'):\n\n super().__init__(query_encoder=query_encoder,\n candidate_encoder=candidate_encoder,\n sampling_type=sampling_type)\n\n self.output_projection = torch.nn.Linear(in_features=1, out_features=2)\n self.criterion = torch.nn.CrossEntropyLoss()\n\n def forward(self, x, y, z=None):\n\n x_expand = x.repeat(2, 1)\n\n x_embed = self.query_encoder(x_expand)\n y_embed = self.candidate_encoder(y)\n\n positive_sim_matrix = (x_embed[:x.size(0), :] * y_embed).sum(dim=1)\n\n if z is None:\n if self.sampling_type == 'random' or not self.training:\n z = self.random_negatives(x, y)\n elif self.sampling_type == 'hard':\n ValueError('Unexpected sampling_type')\n elif self.sampling_type in ['semi_hard', 'semi-hard']:\n z = self.semi_hard_negatives(x, y,\n x_embed[:x.size(0), :],\n y_embed,\n positive_sim_matrix)\n else:\n ValueError('Unexpected sampling_type')\n\n z_embed = self.candidate_encoder(z)\n\n negative_sim_matrix = (x_embed[x.size(0):, :] * z_embed).sum(dim=1)\n\n similarity = torch.cat([positive_sim_matrix, negative_sim_matrix])\n\n prediction = self.output_projection(similarity.unsqueeze(-1))\n\n targets = torch.cat([torch.ones((x.size(0),)), torch.zeros((x.size(0),))]).long().to(x.device)\n\n loss = self.criterion(prediction, targets)\n\n return loss\n\n\nclass MultipleNegativesClassificationWrapper(ClassificationWrapper):\n\n def __init__(self, query_encoder, candidate_encoder, n_negatives=5, sampling_type='random'):\n\n super().__init__(query_encoder=query_encoder,\n candidate_encoder=candidate_encoder,\n sampling_type=sampling_type)\n\n self.n_negatives = n_negatives\n self._indices = list(range(2 ** 15))\n self.criterion = torch.nn.CrossEntropyLoss(reduction='none')\n\n def multiple_random_negatives(self, query, positive_candidates):\n possible_negatives = torch.cat([query, positive_candidates])\n\n batch_size = query.size(0)\n\n current_indices = self._indices[:batch_size]\n\n negative_indices = [[ind for ind in random.sample(current_indices, self.n_negatives * 2)\n if ind not in [i, i + batch_size]][:self.n_negatives]\n for i in current_indices]\n\n negative_indices = torch.tensor(negative_indices).long()\n\n negative_candidates = possible_negatives[negative_indices].view(-1, query.size(-1))\n\n return negative_candidates\n\n def forward(self, x, y, z=None):\n\n batch_size = x.size(0)\n\n x_embed = self.query_encoder(x)\n y_embed = self.candidate_encoder(y)\n\n positive_sim_matrix = (x_embed * y_embed).sum(dim=1)\n\n if z is None:\n if self.sampling_type == 'random' or not self.training:\n z = self.multiple_random_negatives(x, y)\n elif self.sampling_type == 'hard':\n ValueError('Unexpected sampling_type')\n elif self.sampling_type in ['semi_hard', 'semi-hard']:\n ValueError('Unexpected sampling_type')\n else:\n ValueError('Unexpected sampling_type')\n\n z_embed = self.candidate_encoder(z)\n\n negative_sim_matrix = (x_embed.repeat(self.n_negatives, 1) * z_embed).sum(dim=1)\n\n similarity = torch.cat([positive_sim_matrix, negative_sim_matrix])\n\n prediction = self.output_projection(similarity.unsqueeze(-1))\n\n targets = torch.cat([torch.ones((batch_size,)), torch.zeros((batch_size * self.n_negatives,))])\n targets = targets.long().to(x.device)\n\n loss = self.criterion(prediction, targets)\n\n positive_loss = loss[:batch_size]\n negative_loss = loss[batch_size:]\n\n negative_loss = negative_loss.unsqueeze(-1).view(batch_size, -1).mean(dim=1)\n\n loss = positive_loss + negative_loss\n loss = loss.mean()\n\n return loss\n","sub_path":"baseline_training_classes/wrapper_old.py","file_name":"wrapper_old.py","file_ext":"py","file_size_in_byte":7717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"86022895","text":"import json\n\nfrom library.line_bot.helper import message_handler\nfrom library.line_bot.models import Message\n\n\ndef webhook(event, context):\n \"\"\"\n webhook handler for LINE bot.\n \"\"\"\n\n msg = json.loads(event['body'])\n\n # LINE server will send mutli-msg at once, so catch messages with loops.\n for event in msg['events']:\n message = Message(event)\n print(message.to_dict())\n message_handler(message)\n\n # quick response for webhook.\n response = {\n \"statusCode\": 200,\n \"body\": json.dumps({\"message\": 'ok'})\n }\n\n return response\n","sub_path":"handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"195994709","text":"# 1878. Get Biggest Three Rhombus Sums in a Grid\n# vbc 53\n\n# 2021/05/31\n# Runtime: 940 ms, faster than 92.40% of Python3 online submissions for Get Biggest Three Rhombus Sums in a Grid.\n# Memory Usage: 17.6 MB, less than 58.89% of Python3 online submissions for Get Biggest Three Rhombus Sums in a Grid.\n\n# 刚开始想法就是暴力,想看看有没有简单的答案,结果发现提示就是暴力。然后写出来了。\n# 对于每个 (i, j), 菱形的另外3个顶点为 (i-1, j-1), (i-1,j+1), (i-2,j)\n# 然后暴力即可。\n\nclass Solution:\n def getBiggestThree(self, grid: List[List[int]]) -> List[int]:\n m, n = len(grid), len(grid[0])\n sums = set()\n for i in range(m):\n for j in range(n):\n sums.add(grid[i][j])\n l, r = j-1, j+1\n x, t = i-1, i-2\n V = grid[i][j]\n while x >= 0 and l >= 0 and r < n and t >= 0:\n V += grid[x][l] + grid[x][r]\n curr = V + grid[t][j]\n l_, r_ = l + 1, r - 1\n x_ = x - 1\n while l_ < j:\n curr += grid[x_][l_] + grid[x_][r_]\n l_ += 1\n x_ -= 1\n r_ -= 1\n sums.add( curr )\n x -= 1; l -= 1; r += 1; t -= 2\n return sorted(sums, reverse = True)[:3]\n\n\n\n","sub_path":"1878. Get Biggest Three Rhombus Sums in a Grid.py","file_name":"1878. Get Biggest Three Rhombus Sums in a Grid.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"310746930","text":"#%%\nfrom pathlib import Path\nfrom pandas import read_csv\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.svm import LinearSVC\n\n### DATA INGESTION\n\ntrain_data = read_csv(\n Path('./data/train.csv'),\n names=['sentiment', 'id', 'date', 'query_train', 'handle', 'tweet']\n)\n\nX_train = train_data['tweet']\n# 0->negative, 1->positive\ny_train = train_data['sentiment'].replace(to_replace=4, value=1)\n\n### TRAINING AND TESTING\n\npipe = make_pipeline(\n # pre-processing\n CountVectorizer(\n ngram_range=(1, 2),\n ),\n TfidfTransformer(),\n # training\n LinearSVC(C=0.29),\n)\n\npipe.fit(X_train, y_train)\n\ntest_data = read_csv(\n Path('./data/test.csv'),\n names=['sentiment', 'id', 'date', 'query_train', 'handle', 'tweet']\n)\n\n# The test dataset contains neutral tweets. Since the model was trained using only pos/neg sentiments, it would not be fair to include the neutral ones in the test.\ntest_data = test_data[test_data['sentiment'] != 2]\n\nX_test = test_data['tweet']\ny_test = test_data['sentiment'].replace(to_replace=4, value=1)\n\npipe.score(X_test, y_test)\n# Final score of 0.841225626740947\n","sub_path":"testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"391318551","text":"import sys\nsys.stdin = open ('4871_그래프경로.txt', 'r')\n\nT = int(input())\n\nfor tc in range(T):\n p, num = list(map(int, input().split()))\n lines = [list(map(int, input().split())) for _ in range(num)]\n find_s, find_f = list(map(int, input().split()))\n\n check = [[0] * p for _ in range(p)]\n\n for i in range(len(lines)):\n x = lines[i][0]\n y = lines[i][-1]\n check[x-1][y-1] = 1\n\n\n\n result = 0\n stack = []\n for i in range(len(check[find_s - 1])):\n if check[find_s - 1][i] == 1:\n n = find_s\n\n stack.append(find_s -1)\n n = find_s\n\n\n while n != find_f-1:\n n = stack[-1]\n for i in range(p):\n\n if check[n][i] == 1:\n stack.append(i)\n if i == find_f-1:\n result = 1\n n = i\n break\n\n if sum(check[n]) == 0:\n stack.pop()\n\n if len(stack) == 0:\n result = 0\n break\n\n\n print('#{} {}' .format(tc+1, result))\n","sub_path":"Algorithm/190822/2기서울3반_홍수���_4871_그래프경로.py","file_name":"2기서울3반_홍수경_4871_그래프경로.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"69615520","text":"import random as rnd\nimport sys\n\nif __name__ == \"__main__\":\n data = []\n for i in range(0, int(sys.argv[1])):\n data.append([])\n for j in range(0, int(sys.argv[2])):\n data[i].append([\n round(rnd.uniform(0, 5.00), 2), \n rnd.randrange(0, 5), \n #1,\n round(rnd.uniform(0, 500.00), 2)\n ])\n\n print(len(data), len(data[0]))\n\n for i in range(0, len(data)):\n for j in range(0, len(data[0])):\n print(' '.join([str(elem) for elem in data[i][j]]))","sub_path":"random-coordinates-generator.py","file_name":"random-coordinates-generator.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"241409141","text":"def ejemplo():\n\tprint(\"Hola como vass\")\n\tprint(\"Bien and you\")\n\treturn \"Termino\"\n\tprint(\"Fine\")\n\n#El return sirve para terminar una funcion \nejemplo()\n\n\ndef numero(numero):\n\tif numero:\n\t\treturn(\"Ingreso un numero {}\"). format(numero)\n\nrespuesta = numero(23)\nif respuesta:\n\tprint(respuesta)\n","sub_path":"Funciones/Terminar.py","file_name":"Terminar.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"463261215","text":"# Original code by Mehmet Ozturk \n# Modified by Sebastiaan van Essen 07/2016\n# This code combines the time precision of the GNSS network and data from ALM sensors\n# and sends the data via Ethernet to a computer as a stand alone module.\n#\n\n#from _overlapped import NULL\n#from test.support import temp_cwd\n\n\n#///////////////////////////////// Importing modules for functions later used ///////////////////////\n\nimport os # importing the possibility to operate system commands\nimport sys # Importing the possibility to use some system variables\nimport serial # Importing the possibility to use serial communication\nimport threading # Importing the possibility to run multiple operations at the same time\nimport datetime # Importing some (system) clock operations\nimport time # Importing some (system) clock operations\nimport logging # importing the possibility to track events and log them\nimport socket # Importing Networking interface\nfrom macpath import join\nsys.path.append(r'/home/pi/pysrc')\nimport RPi.GPIO as GPIO # readying the code for GPIO usage\nimport pydevd # Import remote debugger\n\n\n\n#///////////////////////////////// Defining variables used for the data splitting ///////////////////\n\n\n# These variables are for the parsing of the ZDA data\nsDag = ''\nsMaand = ''\nsJaar = ''\nsUur = ''\nsMinuut = ''\nsSecond = ''\nsMSecond = ''\ndatum = ''\ntijd = ''\n\n\n# These variables are for the parsing of the AML data\nsAml0 = ''\nsAml1 = ''\nsAml2 = ''\nsAml3 = ''\nsAml4 = ''\nsAml5 = ''\nsAml6 = ''\nstatus = '00'\n\n# These variables are used to pull the time from the systemclock and use them for tagging\nsDagNu = ''\nsMaandNu = ''\nsJaarNu = ''\nsUurNu = ''\nsMinuutNu = ''\nsSecondNu = ''\nsMSecondNu = ''\ndatumNu = ''\ntijdNu = ''\nsetTime = ''\ndatumTijd = ''\n\n#///////////////////////////////// Defining triggers for functions /////////////////////////////////\n\nbTrigger = False # This trigger is used for the PPS input\nbZdaOntvangen = False # This trigger is to keep track of the \"freshness\" of the ZDA time info\nbAmlOntvange = False # This trigger is to see if there is unsent AML info.\n\n\n#///////////////////////////////// Error/debug logging functionality ///////////////////////////////\n\nlogging.basicConfig(level=logging.DEBUG,\n format='[%(levelname)s] (%(threadName)-10s) %(message)s',\n )\n\n\n#///////////////////////////////// GPIO configuration ////////////////////////////////////////////////\n\n#Configuring the general pins for input/output (GPIO\nGPIO.setmode(GPIO.BCM) # setup GPIO using Board numbering\nGPIO.setup(7, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # setting Pin 7 as input, also a pull-down resistor is turned on internally\n\n\n#/////////////////// Serial communication configurations ///////////////////////////////////////////\n\n#Open Com port of GPZDA (connected via pin 8 and 10 of GPIO)\nserZda = serial.Serial('/dev/ttyUSB0') # Linking serZDA to the correct Com port\n#serZda = serial.Serial('/dev/ttyAMA0') # Linking serZDA to the correct Com port\n#serZda.baudrate = 38400 # Setting the communication speed of the serial port\nserZda.baudrate = 9600 # Setting the communication speed of the serial port\nserZda.isOpen() # Open serial port\n\n\n#Open Com port of AML (connected via USB)\nserAml = serial.Serial('/dev/ttyAMA0') # Linking serAml to the correct Com port\n#serAml = serial.Serial('/dev/ttyUSB0') # Linking serAml to the correct Com port\nserAml.baudrate = 9600 # Setting the communication speed of the serial port\n#serAml.baudrate = 38400 # Setting the communication speed of the serial port\nserAml.isOpen() # Open serial port\n\n\n\n\n#///////////////////////////////// Processing the incoming data by splitting it and putting it in usable variables //////////\n\n\n\n #Clearing data from AML\n\ndef clearAml():\n global sAml0; sAml0 = '' #resetting all Aml data to 0\n global sAml1; sAml1 = '' \n global sAml2; sAml2 = ''\n global sAml3; sAml3 = ''\n global sAml4; sAml4 = ''\n global sAml5; sAml5 = ''\n global sAml6; sAml6 = ''\n global datumNu; datumNu = ''\n global dataToSend; dataToSend = '\\r\\n'+'$SBDAML,,,,,,,,00'\n print ('AML cleared\\r\\n')\n writeCom2('AML cleared\\r\\n')\n\n #Pulling the time from the system and write it into a usable variable\n\ndef getTime():\n\n# currentDateTime = datetime.datetime.now().strftime('%H:%M:%S+1.%f,%d,%m,%Y') \n currentDateTimeRaw = datetime.datetime.now() + datetime.timedelta(seconds =1)\n currentDateTime = currentDateTimeRaw.strftime('%H:%M:%S.%f,%d,%m,%Y')\n #currentDateTime = datetime.datetime.now().strftime('%H:%M:%S.%f,%d,%m,%Y') \n currentTime = currentDateTime.split(',') # with split() each comma seperated piece of currentDateTime is written in array currentTime. \n \n global tijdNu; tijdNu = currentTime[0] # Splitting the array into time \n global sDagNu; sDagNu = currentTime[1] # Day\n global sMaandNu; sMaandNu = currentTime[2] # Month \n global sJaarNu; sJaarNu = currentTime[3] # And year \n global datumNu; datumNu = sDagNu + '-' + sMaandNu + '-' + sJaarNu + ',' + tijdNu # The combined data of day+month+year makes the variable datumNu (date) \n\n\n\n\n #Splitting the ZDA data into 8 variables, then process it to time and date\n\ndef parseZda(raw_message):\n if raw_message is None: # if no data is sent stop the madness\n return None\n try:\n sLines = raw_message.split(',') # with split() each comma seperated piece of raw_message is written in array sLines. \n if len(sLines) < 7: # if the data contains less then 7 blocks\n return None\n if len(sLines[1]) < 9: # or more then 9\n return None # do nothing\n \n tempTijd = sLines[1] # tempTijd is the 2nd string of data from array sLines \n global sUur; sUur = tempTijd[:2] # the first two digits are the hours\n global sMinuut; sMinuut = tempTijd[2:4] # digits 3 and 4 are minutes \n global sSecond; sSecond = tempTijd[4:6] # digits 5 and 6 are seconds \n global sMSecond; sMSecond = tempTijd[7:] # all digits from 7 and up are milliseconds \n global tijd; tijd = sUur + ':' + sMinuut + ':' + sSecond + '.' + sMSecond #Time in format HH:MM:SS \n \n if len(sLines[2]) < 2 or len(sLines[3]) < 2 or len(sLines[4]) < 2: # if string 2, 3 or 4 is longer then 2 digits stop the data\n return None\n global sDag; sDag = sLines[2] # the 3th string of sLines is the day\n global sMaand; sMaand = sLines[3] # the 4th string of sLines is the month \n global sJaar; sJaar = sLines[4] # the 3th string of sLines is the year \n global datum; datum = sJaar + '-' + sMaand + '-' + sDag # The combined data of day+month+year makes the variable datum (date) \n# return ' ZDA OK' + ' >> ' +datum + ' ' + tijd # Send confirmation + data (ZDA OK >> parsed data ) to console and Com1\n global datumTijd; datumTijd = \"'\" + datum + ' ' + tijd +\"'\" # The combined data of day+month+year makes the variable datumNu (date)\n# print (datumTijd)\n return ' ZDA OK' + ' >> ' + datumTijd # Send confirmation + data (ZDA OK >> parsed data ) to console and Com1 \n except Exception as e: # if something goes wrong print the error to console\n print ('Exception: ' + e)\n #pass\n\n\n #Splitting the AML Data into 7 variables\n\ndef parseAml (raw_mess):\n sLineAml = raw_mess.split(' ') # with split() each space seperated piece of raw_mess is written in array sLinesAml. \n if len(sLineAml) < 7: # if the data is shorter then 7 blocks of data run next line\n return None # return stops the function if the \"if statement\" is met (see above)\n global dataToSend\n getTime()\n global status\n global sAml0; sAml0 = sLineAml[0] # put the first block of data in the global variable sAml0\n global sAml1; sAml1 = sLineAml[1] # second block into sAml1 and so on.\n global sAml2; sAml2 = sLineAml[2]\n global sAml3; sAml3 = sLineAml[3]\n global sAml4; sAml4 = sLineAml[4]\n global sAml5; sAml5 = sLineAml[5]\n global sAml6; sAml6 = sLineAml[6]\n dataToSend = '\\r\\n'+'$SBDAML' + ',' + datumNu + ',' + sAml2 + ',' + sAml3 + ',' + sAml4 + ',' + sAml5 + ',' + sAml6 + ',' + status\n\n# return ' ALM OK'+' >> '+ sAml2+' '+ sAml3+' '+ sAml4+' '+sAml5 +' '+sAml6 # Send confirmation + data (AML OK >> parsed data ) to console and Com2\n return ' ALM OK'+' >> '+ dataToSend # Send confirmation + data (AML OK >> parsed data ) to console and Com2\n\n\n#///////////////////////////////// Serial receive loops /////////////////////////////////////////////\n\n\n #Serial ZDA (Com1)\n \ndef serZdaReader():\n# writeCom1('Hallo ZDA\\r\\n'); # just a happy hello while starting up (to see if startup goes the way expected)\n \n while True: # Run forever\n bLine = serZda.readline() # Read the incoming data from serial ZDA and put it in bLine\n sLine = bLine.decode(encoding='utf_8') # decode it into usable data \n# writeCom1(sLine) # Write the raw data to Com1\n pass\n print (' COM1 ZDA: ' +sLine) # Write the raw data to terminal\n datumtijd = parseZda(sLine) # parse the raw data string into usable variables\n if datumtijd == None: # if there is no usable data print \"datumtijd is none\"\n print('Datumtijd is none:')\n \n else: # If the data is usable \n bZdaOntvangen = True # The trigger that the data is fresh is put to true\n print (datumtijd+ '\\r\\n'+ '\\r\\n') # Print the usable date and time to terminal\n# writeCom1(datumtijd + '\\r\\n') # Write the usable date and time to Com1 \n\n\n\n\n #Serial AML (Com2)\ndef serAmlReader():\n \n writeCom2('Hallo AML\\r\\n')\n while True: # loop forever\n b1Line = serAml.readline() # read the line from serial ALM and write it to blLine\n s1Line = b1Line.decode(encoding='utf_8') # Decode the data from serial ALM to usable data\n# getTime()\n writeCom2(s1Line) # Write the raw data to Com2\n pass\n print (' COM2 AML: '+s1Line) # Print the raw data to console \n print ( datetime.datetime.now()) # Print to console AML was received\n isAmlValid = parseAml(s1Line) # turn the raw data into usable data blocks\n if isAmlValid == None: # if the data is garbage print \"AML not valid\" to console\n print('AML not valid')\n \n else:\n bZdaOntvangen = True # If the data is not garbage do the following\n print (isAmlValid+ '\\r\\n'+ '\\r\\n') # Print status (OK)to console \n writeCom2(isAmlValid + '\\r\\n') # Print status (OK) to Com2\n\n\n#////////////////////////////////////// Serial Write loops /////////////////////////////////////////////\n\n\ndef writeCom1(textToWrite): # Serial port 1 ZDA Writer\n serZda.write(textToWrite.encode(encoding='utf_8', errors='strict')) # Encode data to serial protocol for Com1\n\n\ndef writeCom2(textToWrite): # Serial poort 2 AML Writer\n serAml.write(textToWrite.encode(encoding='utf_8', errors='strict')) # Encode data to serial protocol for Com2\n\n\n\n#///////////////////////////////// This is what happenes when pin 7 (PPS) goes high ///////////////////\n\n #When pulse() is used this is what happens\ndef pulse(channel):\n global bTrigger; bTrigger = True # First the bTrigger is set to True to show a fresh pulse has been received \n# writeCom1('Trigger gekregen\\n') # A confirmation is sent to Com1\n writeCom2('Trigger gekregen\\n') # A confirmation is sent to Com2 \n print('trigger' ) # Give the terminal that PPS was received\n print (datetime.datetime.now()) # Print to console PPS was received\n# print ('3'+ datumTijd)\n os.system('date -s %s' % datumTijd)\n# os.system('date -s \"1 second\"')\n\n \n #This is the detector that sees the pin goes high then starts the function pulse\nGPIO.add_event_detect(7, GPIO.RISING, callback=pulse, bouncetime = 300) # add rising edge detection on a channel the rest of your program...\n\n\n\n\n\n#////////////////////////////////////// Ethernet write loops //////////////////////////////////////////\n\ndef socketWriter (conn): \n while True:\n# if bTrigger:\n print('Send over Ethernet') # send to console that data is being sent over ethernet\n# dataToSend = '\\r\\n' # Clear data\n# if sDag is not None and sAml0 is not None : # if both serialstreams have data prepare the message to be sent\n# dataToSend = '\\r\\n'+sDag +'-'+ sMaand +'-'+ sJaar +',' + sUur + ':'+ sMinuut+ ':' + sSecond + '.' + sMSecond + ',' + sAml2 + ',' + sAml3 + ',' + sAml4 + ',' + sAml5 + ',' + sAml6\n# getTime()\n# dataToSend = '\\r\\n'+ datumNu + ',' + sAml2 + ',' + sAml3 + ',' + sAml4 + ',' + sAml5 + ',' + sAml6\n\n \n try: # Try to do following, if there is an error go to exept\n# print (datetime.datetime.now()) # Print to console the message sent to Ethernet\n global dataToSend \n print (dataToSend + '\\r\\n') # Print to console the message sent to Ethernet\n conn.send(dataToSend.encode()) # Send data through ethernet (after encoding it)\n clearAml()\n \n except socket.error as msg: # if there is an error print it to console\n print ('Socket failed. Error Code : ' + str(msg[0]) + ' Message ' + msg[1])\n\n time.sleep(1) # Wait for a second (minus runtime of the code) and repeat\n# time.sleep(1 - time.time() %1) # Wait for a second (minus runtime of the code) and repeat\n\n\n#//////////////////////////////////// Ethernet connection setup ///////////////////////////////////////\n\nserversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Create an ethernet socket\n#bind the socket to a public host,\n# and a well-known port\ntry:\n serversocket.bind(('', 5001)) # Bind the socket to port 5001\n print ('Binding ' + socket.gethostname()) # Print \"binding 'hostname' \" to show it is ready for connection \nexcept socket.error as msg: # if there is an error do following\n print ('Bind failed. Error Code : ' + str(msg) ) # print the error to console\n #sys.exit() \n #become a server socket\nserversocket.listen(5) # set as passive socket\n\n\n#//////////////////////////////////// Serial loop ////////////////////////////////////////////////////\n\n#Start thread serial 1 ZDA Reader\nthrZda = threading.Thread(name='serZdaReader', target=serZdaReader) # Create a thread for serial communication(thrZDA) \nthrZda.start() # Start said thread\n\n#Start thread serial 2 AML Reader\nthrAml = threading.Thread(name='serAmlReader', target=serAmlReader) # Create a thread for serial communication(thrAML) \nthrAml.start() # Start said thread\n\n\n#////////////////////////////////// Ethernet Loop /////////////////////////////////////////////////////\n\nwhile 1: # do forever \n conn, addr = serversocket.accept() # wait to accept a connection - blocking call\n print ('Connected with ' + addr[0] + ':' + str(addr[1])) # Print confirmation of an ethernet connection by showing IP address and port\n \n tSock = threading.Thread(target=socketWriter(conn)) # Create a thread for Ethernet communication\n tSock.start() # Start said thread\n tSock.join() # terminate thread\n","sub_path":"Backup/timer0_4.py","file_name":"timer0_4.py","file_ext":"py","file_size_in_byte":18706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"91279337","text":"import os\nimport pygame\n\n\nBLACK = (0, 0, 0)\nWHILE = (255, 255, 255)\n\nclass snake():\n def __init__(self, length=5, headpos=(5, 5), direction = 'right'):\n self.length = length\n self.headpos = headpos\n self.direction = direction\n\n self.snakebody = [headpos]\n temp_x = headpos[0]\n temp_y = headpos[y]\n\n for i in range(self.length-1):\n if direction == 'right':\n temp_x = temp_x - 1\n if direction == 'life':\n temp_x = temp_x + 1\n if direction == 'up':\n temp_y = temp_y - 1\n if direction == 'down':\n temp_y = temp_y + 1\n self.snakebody.append([temp_x, temp_y])\n def draw_self(self,screen):\n for pos in self.snakebody:\n pygame.draw.rect(screen, BLACK, (pos[0]*20, pos[1]*20, 20, 20))\n\n","sub_path":"snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"259888702","text":"from scrapy.http import Request\nfrom scrapy.selector import Selector\nfrom sports_spiders.vtvspider import VTVSpider, extract_data, get_nodes, extract_list_data\nfrom sports_spiders.vtvspider import get_height, get_weight, get_player_details, \\\n get_birth_place_id, get_sport_id, get_state, get_country\nimport re\nimport datetime\nimport urllib.request\nimport urllib.error\nimport urllib.parse\nimport MySQLdb\n\n\nPAR_QUERY = \"insert into sports_participants (id, gid, title, aka, sport_id, \\\n participant_type, image_link, base_popularity, reference_url, \\\n location_id, created_at, modified_at) \\\n values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, now(), now()) on duplicate key update modified_at = now()\"\n\nPL_QUERY = \"insert into sports_players (participant_id, debut, main_role, \\\n roles, gender, age, height, weight, birth_date, birth_place, birth_place_id, \\\n salary_pop, rating_pop, weight_class, marital_status, \\\n participant_since, competitor_since, created_at, modified_at, display_title, short_title) \\\n values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, \\\n %s, %s, %s, %s, %s, %s, now(), now(), %s, %s) on duplicate key update modified_at = now();\"\n\nMAX_ID_QUERY = 'select id, gid from sports_participants where id in \\\n (select max(id) from sports_participants)'\n\n\n# PL_NAME_QUERY = 'select id from sports_participants where \\\n# title = \"%s\" and game=\"%s\" and participant_type=\"player\"';\n\nPL_NAME_QUERY = 'select P.id from sports_participants P, sports_players PL where P.title=\"%s\" and P.sport_id=\"%s\" and P.id=PL.participant_id and PL.birth_date=\"%s\"'\n\nSK_QUERY = 'select entity_id from sports_source_keys where \\\nentity_type=\"participant\" and source=\"MLB\" and source_key= \"%s\"'\n\n\nGAME = 'baseball'\nPAR_TYPE = 'player'\nBASE_POP = \"200\"\nLOC = '0'\nDEBUT = \"0000-00-00\"\nROLES = ''\nSAL_POP = ''\nRATING_POP = ''\nGENDER = 'male'\nMARITAL_STATUS = ''\nPAR_SINCE = COMP_SINCE = ''\nWEIGHT_CLASS = AKA = ''\n\n\nROLE_MAP = {\"P\": \"Pitcher\", \"C\": \"Catcher\",\n \"1B\": \"Infielder\",\n \"2B\": \"Infielder\", \"3B\": \"Infielder\",\n \"SS\": \"Infielder\", 'Bullpen Catcher': 'Bullpen Catcher',\n \"LF\": \"Outfielder\", \"CF\": \"Outfielder\", \"RF\": \"Outfielder\",\n \"DH\": \"Designated Hitter\", \"OF\": \"Outfielder\",\n 'infielder': 'Infielder'}\n\nSTATES_DICT = {'CO': 'Colorado', 'TX': 'Texas',\n 'AL': 'Alabama', 'MI': 'Michigan',\n 'PA': 'Pennsylvania', 'MO': 'Missouri',\n 'NC': 'North Carolina', 'FL': 'Florida',\n 'OK': 'Oklahoma', 'CA': 'California',\n 'IN': 'Indiana', 'IL': 'Illinois',\n 'MA': 'Massachusetts', 'NY': 'New York',\n 'CT': 'Connecticut', 'TN': 'Tennessee',\n 'OH': 'Ohio', 'AR': 'Arkansas', 'OR': 'Oregon',\n 'VA': 'Virginia', 'WA': 'Washington', 'SC': 'South Carolina',\n 'LA': 'Louisiana', 'NV': 'Nevada', 'NJ': 'New Jersey',\n 'KY': 'Kentucky', 'MN': 'Minnesota', 'GA': 'Georgia',\n 'KS': 'Kansas', 'MD': 'Maryland', 'AZ': 'Arizona',\n 'SD': 'South Dakota', 'MS': 'Mississippi',\n 'NE': 'Nebraska', 'BC': 'British Columbia',\n 'DE': 'Delaware', 'HI': 'Hawaii', 'ND': 'North Dakota',\n 'VI': 'United States Virgin Islands',\n 'NH': 'New Hampshire', 'WI': 'Wisconsin', 'IA': 'Iowa',\n 'NM': 'New Mexico', 'ID': 'Idaho', 'WY': 'Wyoming',\n 'AK': 'Alaska', 'ME': 'Maine', 'DC': 'District of Columbia',\n 'ON': 'Ontario', 'WV': 'West Virginia', 'RI': 'Rhode Island'}\n\n\nclass MlbPlayers(VTVSpider):\n name = \"mlb_players\"\n start_urls = ['http://mlb.mlb.com/mlb/players/index.jsp']\n player_ref_url = 'http://m.mlb.com/player/%s/%s'\n\n def __init__(self):\n #self.conn = MySQLdb.connect(host=\"10.28.216.45\", user=\"veveo\", passwd='veveo123', db=\"SPORTSDB_DEV\", charset='utf8', use_unicode=True)\n self.conn = MySQLdb.connect(host=\"10.28.218.81\", user=\"veveo\",\n passwd='veveo123', db=\"SPORTSDB\", charset='utf8', use_unicode=True)\n self.cursor = self.conn.cursor()\n\n def check_player(self, pl_sk):\n self.cursor.execute(SK_QUERY % pl_sk)\n entity_id = self.cursor.fetchone()\n if entity_id:\n pl_exists = True\n pl_id = str(entity_id[0])\n else:\n pl_exists = False\n pl_id = ''\n return pl_exists, pl_id\n\n def add_source_key(self, entity_id, _id):\n if _id and entity_id:\n query = \"insert into sports_source_keys (entity_id, entity_type, \\\n source, source_key, created_at, modified_at) \\\n values(%s, %s, %s, %s, now(), now()) on duplicate key update modified_at = now()\"\n values = (entity_id, 'participant', 'MLB', _id)\n\n self.cursor.execute(query, values)\n\n def check_title(self, name, dob):\n self.cursor.execute(PL_NAME_QUERY % (name, '1', dob))\n pl_id = self.cursor.fetchone()\n return pl_id\n\n def parse(self, response):\n hxs = Selector(response)\n nodes = get_nodes(hxs, '//select[@id=\"ps_team\"]/option')\n for node in nodes:\n link = extract_data(node, './@value').strip()\n team_name = extract_data(node, './text()').strip()\n if \"Team Rosters\" in team_name:\n continue\n if \"http:\" not in link:\n continue\n yield Request(link, callback=self.parse_listing,\n meta={'team_name': team_name})\n\n def parse_listing(self, response):\n hxs = Selector(response)\n nodes = get_nodes(\n hxs, '//table[@class=\"data roster_table\"]/tbody/tr//a')\n if not nodes:\n nodes = get_nodes(\n hxs, '//table[@class=\"data roster_table\"]/tbody/tr/td/a')\n last_node = nodes[-1]\n participants = {}\n for node in nodes:\n terminal_crawl = False\n if node == last_node:\n terminal_crawl = True\n player_link = extract_data(node, './@href')\n pl_link = response.url.replace('/roster/40-man', '') + player_link\n if not player_link:\n continue\n player_id = pl_link.split('/')[-2].strip()\n pl_exists, pl_id = self.check_player(player_id)\n player_link = \"http://mlb.mlb.com/lookup/json/named.player_info.bam?sport_code='mlb'&player_id=%s\" % (\n player_id)\n yield Request(player_link, self.parse_playeradd, meta={'pl_exists': pl_exists, 'pl_id': pl_id})\n\n coaches = extract_list_data(\n hxs, '//a[contains(text(), \"Coaching Staff\")]/@href')\n if not coaches:\n coaches = extract_list_data(\n hxs, '//a[contains(text(), \"Coaches\")]/@href')\n if coaches:\n coaches = coaches[0]\n if 'htpp' not in coaches:\n coaches = response.url.split('/roster')[0] + coaches\n yield Request(coaches, self.parse_coaches)\n\n def parse_coaches(self, response):\n hxs = Selector(response)\n nodes = get_nodes(hxs, '//tbody[@class=\"coaches\"]/tr')\n if not nodes:\n nodes = get_nodes(hxs, '//table[@class=\"data roster_table\"]//tr')\n for node in nodes:\n link = extract_data(node, './/td//a//@href')\n data = extract_list_data(node, './/td//text()')\n if not data:\n continue\n type_ = data[-1]\n pl_number = data[0].replace('\\\\u2014', '').strip()\n if link: # and 'coach' in type_.lower():\n pl_sk = re.findall('\\d+', link)[0]\n pl_exists, pl_id = self.check_player(pl_sk)\n player_link = \"http://mlb.mlb.com/lookup/json/named.player_info.bam?sport_code='mlb'&player_id=%s\" % (\n pl_sk)\n yield Request(player_link, self.parse_playeradd, meta={'pl_id': pl_id, 'pl_exists': pl_exists})\n\n def parse_playeradd(self, response):\n raw_data = response.body\n data = eval(raw_data)\n p_info = data['player_info']['queryResults']['row']\n player_number = p_info['jersey_number']\n role = p_info['primary_position_txt']\n if role in ROLE_MAP:\n role = ROLE_MAP[role]\n\n sourcekey = p_info['player_id']\n team_callsign = p_info['team_abbrev']\n player_name = p_info['name_display_first_last']\n dob = p_info['birth_date'].replace('T', ' ').split(' ')[0].strip()\n try:\n dob = str(datetime.datetime.strptime(dob, '%Y-%m-%d'))\n except:\n dob = ''\n b_place = p_info['birth_city']\n b_state = p_info['birth_state']\n if len(b_state) == 2:\n if STATES_DICT.get(b_state, ''):\n b_state = STATES_DICT[b_state]\n else:\n print((b_state, response.url))\n b_country = p_info['birth_country']\n if not b_state:\n b_state = get_state(city=b_place, country=b_country)\n if not b_country:\n b_country = get_country(city=b_place, state=b_state)\n\n birth_place = ', '.join(\n [place.strip() for place in [b_place, b_state, b_country] if place.strip()])\n weight = p_info['weight']\n feet = p_info['height_feet']\n inches = p_info['height_inches']\n main_role = ''\n age = p_info['age']\n debut_date = p_info.get('pro_debut_date', '').replace('T', ' ')\n pl_id = self.check_title(player_name, dob)\n if inches == '0' and feet == '0':\n inches = ''\n feet = ''\n elif feet == '0' and not inches:\n feet = ''\n if weight == '0':\n weight = ''\n height = get_height(feet, inches)\n weight = get_weight(weight)\n ref_url = self.player_ref_url % (\n sourcekey, player_name.replace(' ', '-').lower())\n loc_id = get_birth_place_id(b_place, b_state, b_country)\n\n if response.meta['pl_exists'] == True:\n details = {'age': age, 'birth_place': birth_place,\n 'height': height, 'weight': weight,\n 'debut_date': debut_date,\n 'ref_url': ref_url, 'loc_id': loc_id}\n\n pl_id = response.meta['pl_id']\n get_player_details(details, pl_id)\n else:\n if pl_id:\n self.add_source_key(str(pl_id[0]), sourcekey)\n print((\"Added sk\", player_name))\n else:\n img = 'http://mlb.mlb.com/images/players/mugshot/ph_%s.jpg' % (\n sourcekey)\n self.cursor.execute(MAX_ID_QUERY)\n pl_data = self.cursor.fetchall()\n max_id, max_gid = pl_data[0]\n next_id = max_id + 1\n next_gid = 'PL' + str(int(max_gid.replace('TEAM', '').\n replace('PL', '')) + 1)\n sport_id = get_sport_id(GAME)\n values = (next_id, next_gid, player_name, AKA, sport_id, PAR_TYPE, img,\n BASE_POP, ref_url, loc_id)\n self.cursor.execute(PAR_QUERY, values)\n\n values = (next_id, debut_date, role, ROLES, GENDER,\n age, height, weight, dob, birth_place, loc_id, SAL_POP, RATING_POP,\n WEIGHT_CLASS, MARITAL_STATUS, PAR_SINCE, COMP_SINCE, '', '')\n\n self.cursor.execute(PL_QUERY, values)\n\n self.add_source_key(next_id, sourcekey)\n print((\"Added player\", player_name))\n","sub_path":"SPORTS/sports_spiders/sports_spiders/spiders/mlb_players.py","file_name":"mlb_players.py","file_ext":"py","file_size_in_byte":11729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"459229233","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('stocklist', '0002_auto_20151020_2026'),\n ]\n\n operations = [\n migrations.RenameModel(\n old_name='Picks',\n new_name='Pick',\n ),\n ]\n","sub_path":"stocklist/migrations/0003_auto_20151020_2029.py","file_name":"0003_auto_20151020_2029.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"113228901","text":"import PySimpleGUI as sg\nfrom models.film import Film\nfrom models.vertoning import Vertoning\nfrom models.ticket import Ticket\nfrom layouts.gui_utils import get_img_data\n\n\ndef create_layout_detail():\n\n poster = sg.Image(data=\"\", key=\"-poster-\")\n\n layout = [\n [\n sg.Column([[poster]]),\n sg.Column([\n [sg.Text(\"\", size=(50, 1), font=\"Helvetica 24\",\n key=\"-titel-\")],\n [\n sg.Text(\"\", size=(8, 1), font=\"Helvetica 14\",\n key=\"-duur-\")\n ],\n [sg.Text(\"KINDEREN NIET TOEGELATEN\", font=\"Helvetica 14\", text_color=\"red\",\n key=\"-knt-\", visible=False, background_color=\"white\")],\n [sg.Text(\"\", size=(50, 10), font=\"Helvetica 18\",\n key=\"-beschrijving-\")],\n\n [\n sg.Column([\n [sg.Listbox([], size=(50, 5), font=\"Helvetica 18\", enable_events=True,\n key=\"-vertoningen-\", pad=(10, 10))],\n [sg.Button(\"Terug naar films\", font=\"Helvetica 16\", key=\"-terug_naar_films-\"), sg.Button(\n \"Vertoning kiezen\", disabled=True, font=\"Helvetica 16\", pad=((310, 0), (0, 0)), key=\"-naar_kopen-\")]\n ], key=\"-c_vertoningen-\"),\n\n sg.Frame(\"Tickets\", [\n [\n sg.Text(\"Aantal volwassenen: \",\n font=\"Helvetica 18\", pad=((20, 7), (10, 0))),\n sg.Spin(values=[i for i in range(\n 999)], font=\"Helvetica 18\", enable_events=True, key=\"-volwassenen-\", pad=((0, 108), (10, 0))),\n sg.Text(\"Aantal kinderen: \", font=\"Helvetica 18\",\n visible=True, key=\"-label_kinderen-\", pad=((0, 7), (10, 0))),\n sg.Spin(values=[i for i in range(\n 999)], font=\"Helvetica 18\", enable_events=True, key=\"-kinderen-\", visible=True, pad=((0, 10), (10, 0)))\n ],\n [\n sg.Text(\"Prijs: €\", font=\"Helvetica 18\",\n pad=((20, 7), (10, 0))),\n sg.Text(\"0.00\", font=\"Helvetica 18\",\n key=\"-prijs-\", pad=((0, 0), (10, 0)))\n ],\n [sg.Button(\"Andere vertoning\", font=\"Helvetica 16\", key=\"-terug_naar_vertoningen-\", pad=((20, 0), (10, 10))), sg.Button(\n \"Tickets kopen\", disabled=True, font=\"Helvetica 16\", pad=((260, 20), (10, 10)), key=\"-koop_tickets-\")]\n ], key=\"-c_kopen-\", visible=False),\n ]\n ], pad=(40, 0))\n ]\n ]\n\n return layout\n\n\ndef update_layout_detail(window: sg.Window, film: Film, vertoningen: list[Vertoning]):\n # Update poster\n window[\"-poster-\"].update(data=get_img_data(film.get_afbeelding(),\n maxsize=(500, 500), first=True))\n # Update titel\n window[\"-titel-\"].update(value=film.titel)\n # Update duur\n uren = int(film.duur / 60)\n minuten = film.duur % 60\n window[\"-duur-\"].update(value=f\"{uren}u {minuten}min\")\n # Update KNT\n window[\"-knt-\"].update(visible=bool(film.knt))\n window[\"-label_kinderen-\"].update(visible=not bool(film.knt))\n window[\"-kinderen-\"].update(visible=not bool(film.knt))\n # Update beschrijving\n window[\"-beschrijving-\"].update(value=film.beschrijving)\n # Update vertoningen\n window[\"-vertoningen-\"].update(values=vertoningen)\n # Reset disabled \"Vertoning Kiezen\" knop\n window[\"-naar_kopen-\"].update(disabled=True)\n # Reset aantal volwassenen en kinderen\n window[\"-volwassenen-\"].update(value=0)\n window[\"-kinderen-\"].update(value=0)\n\n\ndef update_layout_prijs(window: sg.Window, vertoning: Vertoning):\n totaal_prijs = Ticket.bereken_prijs(vertoning, False) * int(window[\"-volwassenen-\"].get()) \\\n + Ticket.bereken_prijs(vertoning, True) * \\\n int(window[\"-kinderen-\"].get())\n\n window[\"-prijs-\"].update(value=str(totaal_prijs))\n if totaal_prijs > 0:\n window[\"-koop_tickets-\"].update(disabled=False)\n else:\n window[\"-koop_tickets-\"].update(disabled=True)\n","sub_path":"layouts/layout_detail.py","file_name":"layout_detail.py","file_ext":"py","file_size_in_byte":4404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"266350367","text":"# Set up your imports here!\n# import ...\nfrom flask import Flask\n\napp = Flask(__name__)\n\n\n@app.route('/') # Fill this in!\ndef index():\n # Welcome Page\n # Create a generic welcome page.\n return \"Please go to /puppy_latin/ to get your puppy latin name.\"\n\n\n@app.route('/puppy_latin/') # Fill this in!\ndef puppylatin(name):\n # This function will take in the name passed\n # and then use \"puppy-latin\" to convert it!\n\n # HINT: Use indexing and concatenation of strings\n # For Example: \"hello\"+\" world\" --> \"hello world\"\n if name[-1] == \"y\":\n name = name[:-1] + \"iful\"\n else:\n name += \"y\"\n return \"Your puppy latin name is {}\".format(name)\n\n\nif __name__ == '__main__':\n # Fill me in!\n app.run(debug=True)\n","sub_path":"Section 07 - Flask Basics/8-Routing_Exercise.py","file_name":"8-Routing_Exercise.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"134839498","text":"import os\nimport pickle\nimport numpy as np\nimport math\nfrom PIL import Image\nimport copy\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\n\nLEN_KPS = 10\nTHRE = 0.5\nthreshold = 0.5\nimg_size = np.array([[256.0, 256.0]], dtype=np.float32)\nHAND_SIDE = 'right'\n\ndef crop_hand(frames_list, ori_img_size, hand_side):\n frames_list_new = []\n bbxes = []\n valid_frame_list = []\n for i in range(frames_list.shape[0]):\n frame_data = frames_list[i]\n skeleton = frame_data[:, 0:2]\n confidence = frame_data[:, 2]\n usz, vsz = [ori_img_size[0][0], ori_img_size[0][1]]\n minsz = min(usz, vsz)\n maxsz = max(usz, vsz)\n if hand_side == 'right':\n right_keypoints = skeleton[112:133, :]\n kp_visible = (confidence[112:133] > 0.1)\n uvis = right_keypoints[kp_visible, 0]\n vvis = right_keypoints[kp_visible, 1]\n elif hand_side == 'left':\n left_keypoints = skeleton[91:112, :]\n kp_visible = (confidence[91:112] > 0.1)\n uvis = left_keypoints[kp_visible, 0]\n vvis = left_keypoints[kp_visible, 1]\n else:\n raise ValueError('wrong hand side')\n if len(uvis) < LEN_KPS:\n bbx = elbow_hand(skeleton, confidence, ori_img_size, hand_side)\n if bbx is None:\n continue\n else:\n bbxes.append(bbx)\n frames_list_new.append(frame_data)\n valid_frame_list.append(i)\n else:\n umin = min(uvis)\n vmin = min(vvis)\n umax = max(uvis)\n vmax = max(vvis)\n\n B = round(2.2 * max([umax - umin, vmax - vmin]))\n\n us = 0\n ue = usz - 1\n vs = 0\n ve = vsz - 1\n umid = umin + (umax - umin) / 2\n vmid = vmin + (vmax - vmin) / 2\n\n if (B < minsz - 1):\n us = round(max(0, umid - B / 2))\n ue = us + B\n if (ue > usz - 1):\n d = ue - (usz - 1)\n ue = ue - d\n us = us - d\n vs = round(max(0, vmid - B / 2))\n ve = vs + B\n if (ve > vsz - 1):\n d = ve - (vsz - 1)\n ve = ve - d\n vs = vs - d\n if (B >= minsz - 1):\n B = minsz - 1\n if usz == minsz:\n vs = round(max(0, vmid - B / 2))\n ve = vs + B\n if (ve > vsz - 1):\n d = ve - (vsz - 1)\n ve = ve - d\n vs = vs - d\n if vsz == minsz:\n us = round(max(0, umid - B / 2))\n ue = us + B\n\n if (ue > usz - 1):\n d = ue - (usz - 1)\n ue = ue - d\n us = us - d\n us = int(us)\n vs = int(vs)\n ue = int(ue)\n ve = int(ve)\n bbx = [us, ue, vs, ve]\n bbxes.append(bbx)\n frames_list_new.append(frame_data)\n valid_frame_list.append(i)\n\n bbxes = np.array(bbxes, dtype=np.float32)\n average_width = np.average(bbxes[:, 1] - bbxes[:, 0])\n average_height = np.average(bbxes[:, 3] - bbxes[:, 2])\n rescale_bbx = np.array([average_width, average_height], dtype=np.float32)\n return frames_list_new, bbxes, rescale_bbx, valid_frame_list\n\n\ndef elbow_hand(pose_keypoints, confidence, ori_img_size, hand_side):\n right_hand = pose_keypoints[[2, 3, 4]]\n left_hand = pose_keypoints[[5, 6, 7]]\n ratioWristElbow = 0.33\n detect_result = []\n img_width, img_height = [ori_img_size[0][0], ori_img_size[0][1]]\n if hand_side == 'right':\n has_right = np.sum(confidence[[2, 3, 4]] < THRE) == 0\n if not has_right:\n return None\n x1, y1 = right_hand[0][:2]\n x2, y2 = right_hand[1][:2]\n x3, y3 = right_hand[2][:2]\n\n x = x3 + ratioWristElbow * (x3 - x2)\n y = y3 + ratioWristElbow * (y3 - y2)\n distanceWristElbow = math.sqrt((x3 - x2) ** 2 + (y3 - y2) ** 2)\n distanceElbowShoulder = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)\n width = 1.1 * max(distanceWristElbow, 0.9 * distanceElbowShoulder)\n x -= width / 2\n y -= width / 2 # width = height\n\n if x < 0: x = 0\n if y < 0: y = 0\n width1 = width\n width2 = width\n if x + width > img_width: width1 = img_width - x\n if y + width > img_height: width2 = img_height - y\n width = min(width1, width2)\n detect_result.append([int(x), int(y), int(width)])\n\n elif hand_side == 'left':\n has_left = np.sum(confidence[[5, 6, 7]] < THRE) == 0\n if not has_left:\n return None\n x1, y1 = left_hand[0][:2]\n x2, y2 = left_hand[1][:2]\n x3, y3 = left_hand[2][:2]\n\n x = x3 + ratioWristElbow * (x3 - x2)\n y = y3 + ratioWristElbow * (y3 - y2)\n distanceWristElbow = math.sqrt((x3 - x2) ** 2 + (y3 - y2) ** 2)\n distanceElbowShoulder = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)\n width = 1.1 * max(distanceWristElbow, 0.9 * distanceElbowShoulder)\n x -= width / 2\n y -= width / 2 # width = height\n if x < 0: x = 0\n if y < 0: y = 0\n width1 = width\n width2 = width\n if x + width > img_width: width1 = img_width - x\n if y + width > img_height: width2 = img_height - y\n width = min(width1, width2)\n detect_result.append([int(x), int(y), int(width)])\n\n x, y, width = int(x), int(y), int(width)\n return [x, x + width, y, y + width]\n\n\ndef get_kp2ds(skeleton, conf, threshold, hand_side):\n if hand_side == 'left':\n hand_kp2d = skeleton[91:112, :]\n confidence = conf[91:112]\n elif hand_side == 'right':\n hand_kp2d = skeleton[112:133, :]\n confidence = conf[112:133]\n else:\n raise Exception('wrong hand_side type')\n confidence = np.where(confidence > threshold, confidence, 0.0)\n indexes = np.where(confidence < threshold)[0].tolist()\n for i in range(len(indexes)):\n hand_kp2d[indexes[i]] = np.zeros((1, 2), dtype=np.float32)\n confidence = np.tile(confidence[:, np.newaxis], (1, 2))\n return hand_kp2d, confidence\n\n\ndef visual(kp2ds, save_path, img_path=None, img=None):\n if img is None:\n img = Image.open(img_path)\n plt.figure()\n plt.imshow(img)\n for j in range(0, 1):\n if kp2ds[j][0] < 0.0:\n continue\n plt.plot(kp2ds[j][0], kp2ds[j][1], 'bo')\n for j in range(1, 5):\n if kp2ds[j][0] < 0.0:\n continue\n plt.plot(kp2ds[j][0], kp2ds[j][1], 'ro')\n for j in range(5, 9):\n if kp2ds[j][0] < 0.0:\n continue\n plt.plot(kp2ds[j][0], kp2ds[j][1], 'go')\n for j in range(9, 13):\n if kp2ds[j][0] < 0.0:\n continue\n plt.plot(kp2ds[j][0], kp2ds[j][1], 'yo')\n for j in range(13, 17):\n if kp2ds[j][0] < 0.0:\n continue\n plt.plot(kp2ds[j][0], kp2ds[j][1], 'ko')\n for j in range(17, 21):\n if kp2ds[j][0] < 0.0:\n continue\n plt.plot(kp2ds[j][0], kp2ds[j][1], 'mo')\n # joints = [(0, 1), (1, 2), (2, 3), (3, 4),\n # (0, 5), (5, 6), (6, 7), (7, 8),\n # (0, 9), (9, 10), (10, 11), (11, 12),\n # (0, 13), (13, 14), (14, 15), (15, 16),\n # (0, 17), (17, 18), (18, 19), (19, 20)]\n # for j in range(len(joints)):\n # plt.plot([kp2ds[joints[j][0]][0], kp2ds[joints[j][1]][0]],\n # [kp2ds[joints[j][0]][1], kp2ds[joints[j][1]][1]],\n # linewidth=1, color='cyan')\n # plt.show()\n plt.savefig(save_path)\n\n\n\n\nroot_img_path = '/data3/alexhu/Datasets/AUTSL_Upper/jpg_video/'\nroot_joint_path = '/data3/alexhu/Datasets/AUTSL_Upper/Keypoints_2d_mmpose/'\nroot_hand_img_path = '/data3/alexhu/Datasets/AUTSL_Upper/jpg_right_hand/'\nvideo_joint_pkl = {}\n\nsplit_list = os.listdir(root_img_path)\nfor split_name in split_list:\n real_split_path = os.path.join(root_img_path, split_name)\n video_list = os.listdir(real_split_path)\n for video_name in tqdm(video_list):\n if video_name.endswith('depth'):\n continue\n print(split_name, video_name)\n\n joint_path = os.path.join(root_joint_path, split_name, video_name+'.pkl')\n with open(joint_path, 'rb') as f:\n all_dict = pickle.load(f)\n video_joints = all_dict['keypoints']\n real_video_path = os.path.join(real_split_path, video_name)\n real_img_path = os.path.join(real_split_path, video_name, '000000.jpg')\n ori_img_size = Image.open(real_img_path).size\n ori_img_size = np.array([[ori_img_size[0], ori_img_size[1]]], dtype=np.float32)\n video_joints_update, bbxes, rescale_bbx, valid_frame_list = crop_hand(video_joints, ori_img_size, HAND_SIDE)\n\n clrs = []\n img_list = sorted(os.listdir(real_video_path))\n img_list.pop(-1)\n if len(img_list) != len(valid_frame_list):\n print(real_video_path)\n frame_dict = {}\n for i in range(len(valid_frame_list)):\n frame_index = '%06d.jpg' %(valid_frame_list[i])\n real_frame_path = os.path.join(real_video_path, frame_index)\n\n frame = video_joints_update[i]\n skeleton = frame[:, 0:2]\n confidence = frame[:, 2]\n kp2ds, confidence = get_kp2ds(skeleton, confidence, threshold, HAND_SIDE)\n\n # bbox x1 x2 y1 y2\n center = [(bbxes[i][1] + bbxes[i][0])//2, (bbxes[i][3] + bbxes[i][2])//2]\n B = int(rescale_bbx[0]) // 2\n if center[0] < B:\n B = center[0]\n elif center[0] + B > ori_img_size[0][0] -1:\n B = ori_img_size[0][0] - 1 - center[0]\n if center[1] < B:\n B = center[1]\n elif center[1] + B > ori_img_size[0][1] -1:\n B = ori_img_size[0][1] - 1 - center[1]\n scale = np.array(\n [[2 * B, 2 * B]],\n dtype=np.float32)\n trans = np.array([[center[0]-B, center[1]-B]], dtype=np.float32)\n # trans = np.array([[bbxes[i][0], bbxes[i][2]]], dtype=np.float32)\n # scale = np.array(\n # [[bbxes[i][1] - bbxes[i][0], bbxes[i][3] - bbxes[i][2]]], # bbox x1 x2 y1 y2\n # dtype=np.float32)\n assert scale[0, 1] > 0.0 and scale[0, 0] > 0.0\n kp2ds = (kp2ds - trans) / scale * img_size\n kp2ds = np.where(kp2ds > 0.0, kp2ds, 0.0)\n gt = copy.deepcopy(kp2ds)\n\n # clr = Image.open(real_frame_path)\n # # clr = clr.crop(\n # # (bbxes[i][0], bbxes[i][2], bbxes[i][1], bbxes[i][3]))\n # clr = clr.crop(\n # (center[0]-B, center[1]-B, center[0]+B, center[1]+B))\n # clr = clr.resize((256, 256))\n # clrs.append(clr)\n\n\n frame_dict[valid_frame_list[i]] = [gt, confidence, [center[0]-B, center[1]-B, center[0]+B, center[1]+B]]\n hand_video_path = os.path.join(root_hand_img_path, split_name, video_name)\n if not os.path.exists(hand_video_path):\n os.makedirs(hand_video_path)\n hand_img_path = os.path.join(hand_video_path, frame_index)\n # visual(gt, hand_img_path, img=clr)\n # clr.save(hand_img_path)\n video_joint_pkl[video_name] = frame_dict\n # exit()\n\nwith open('Joint_right_hand.pkl', 'wb') as f:\n pickle.dump(video_joint_pkl, f)\n","sub_path":"data_arrange/Stage9_Crop_Face.py","file_name":"Stage9_Crop_Face.py","file_ext":"py","file_size_in_byte":11821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"447082112","text":"import unittest\n\nfrom ingenico.connect.sdk.param_request import ParamRequest\nfrom tests.unit.comparable_param import ComparableParam\n\n\nclass AbstractParamRequestTest(unittest.TestCase):\n \"\"\"Contains tests that test if the AbstractParamRequest class accepts the proper parameter types and refuses others\n \"\"\"\n\n def test_add_parameter_string(self):\n \"\"\"Tests that an AbstractParamRequest accepts a string as value\"\"\"\n name = \"foo\"\n value = \"false\"\n listvar = []\n reflist = [ComparableParam(name, value)]\n ParamRequest()._add_parameter(listvar, name, value)\n self.assertCountEqual(reflist, listvar)\n\n def test_add_parameter_int(self):\n \"\"\"Tests that an AbstractParamRequest accepts an int as value\"\"\"\n name = \"foo\"\n value = 1234567890\n listvar = []\n reflist = [ComparableParam(name, value.__str__())]\n ParamRequest()._add_parameter(listvar, name, value)\n self.assertCountEqual(reflist, listvar)\n\n def test_add_parameter_longer_int(self):\n \"\"\"Tests that an AbstractParamRequest accepts a longer int as value\"\"\"\n name = \"foo\"\n value = 1234567890987654321\n listvar = []\n reflist = [ComparableParam(name, value.__str__())]\n ParamRequest()._add_parameter(listvar, name, value)\n self.assertCountEqual(reflist, listvar)\n\n def test_add_parameter_bool(self):\n \"\"\"Tests that an AbstractParamRequest accepts a boolean as value\"\"\"\n name = \"bar\"\n value = False\n listvar = []\n reflist = [ComparableParam(name, value.__str__())]\n ParamRequest()._add_parameter(listvar, name, value)\n self.assertCountEqual(reflist, listvar)\n\n def test_add_parameter_string_list(self):\n \"\"\"Tests that an AbstractParamRequest accepts a list with a single string as value\"\"\"\n name = \"foolist\"\n value = [\"foo\"]\n listvar = []\n reflist = [ComparableParam(name, value[0].__str__())]\n ParamRequest()._add_parameter(listvar, name, value)\n self.assertCountEqual(reflist, listvar)\n\n def test_add_parameter_int_list(self):\n \"\"\"Tests that an AbstractParamRequest accepts a list of a single int as value\"\"\"\n name = \"foolist\"\n value = [1337]\n listvar = []\n reflist = [ComparableParam(name, str(value[0]))]\n ParamRequest()._add_parameter(listvar, name, value)\n self.assertCountEqual(reflist, listvar)\n\n def test_add_parameter_bool_list(self):\n \"\"\"Tests that an AbstractParamRequest accepts a list of two booleans as value\"\"\"\n name = \"barfoo\"\n value = [False, True]\n listvar = []\n reflist = [ComparableParam(name, value[0].__str__()),\n ComparableParam(name, value[1].__str__())]\n ParamRequest()._add_parameter(listvar, name, value)\n try:\n self.assertListEqual(reflist, listvar)\n except AttributeError:\n print(type(listvar[1]))\n print(type(reflist[1]))\n self.fail()\n\n def test_add_parameter_float(self):\n \"\"\"Tests that an AbstractParamRequest refuses a float as value and responds with an appropriate error\"\"\"\n name = \"bar\"\n value = 1.9999999999\n listvar = []\n self.assertRaises(ValueError, ParamRequest()._add_parameter, listvar,\n name, value)\n\n def test_add_string_list_list(self):\n \"\"\"Tests that an AbstractParamRequest refuses a float as value and responds with an appropriate error\"\"\"\n name = \"bar\"\n value = [[\"foo\"]]\n listvar = []\n self.assertRaises(ValueError, ParamRequest()._add_parameter, listvar,\n name, value)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/unit/test_param_request.py","file_name":"test_param_request.py","file_ext":"py","file_size_in_byte":3782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"189979642","text":"# -*- coding: utf-8 -*-\nfrom zope import schema\n\nfrom plone.namedfile.field import NamedBlobImage\n\nfrom bika.lims import messagefactory as _\nfrom bika.lims.interfaces.organisation import IOrganisation\nfrom plone.supermodel import model\n\n\nclass ILaboratory(IOrganisation):\n \"\"\"Lab Client\n \"\"\"\n\n title = schema.TextLine(\n title=_(u\"Name\"),\n required=True,\n )\n\n model.fieldset('accreditation',\n label=_(u\"Accreditation\"),\n fields=['confidence',\n 'accredited',\n 'accreditation_body',\n 'accreditation_body_url',\n 'accreditation',\n 'accreditation_reference',\n 'accreditation_body_logo',\n 'accreditation_page_header',\n ]\n )\n accredited = schema.Bool(\n title=_(u\"Laboratory Accredited\"),\n description=_(u\"Check this box if your laboratory is accredited\"),\n required=False\n )\n\n confidence = schema.Int(\n title=_(u\"Confidence Level %\"),\n description=_(u\"This value is reported at the bottom of all \"\n u\"published results\"),\n required=False\n )\n\n accreditation_body = schema.TextLine(\n title=_(u\"Accreditation Body\"),\n description=_(u\"Name of accreditation body, e.g. SANAS, APLAC, etc.\"),\n required=False\n )\n\n accreditation_body_url = schema.URI(\n title=_(u\"Accreditation Body URL\"),\n description=_(u\"Web address for the accreditation body\"),\n required=False\n )\n\n accreditation = schema.TextLine(\n title=_(u\"Accreditation\"),\n description=_(u\"The accreditation standard that applies, \"\n u\"e.g. ISO 17025\"),\n required=False\n )\n\n accreditation_reference = schema.URI(\n title=_(u\"Accreditation Reference\"),\n description=_(u\"The reference code issued to the lab by the \"\n u\"accreditation body\"),\n required=False\n )\n\n accreditation_body_logo = NamedBlobImage(\n title=_(u\"Accreditation Logo\"),\n description=_(\n u\"Please upload the logo you are authorised to use on your \"\n u\"website and results reports by your accreditation body.\"),\n required=False\n )\n\n accreditation_page_header = schema.SourceText(\n title=_(u\"Accreditation page header\"),\n description=_(\n u\"Enter the details of your lab's service accreditations here. \"\n u\"The following fields are available:
\"\n u\"• lab_name
\"\n u\"• lab_country
\"\n u\"• confidence
\"\n u\"• accreditation_body
\"\n u\"• standard
\"\n u\"• reference
\"),\n default=u\"

{lab_name} has been accredited as {standard} conformant \"\n u\"by {accreditation_body} ({reference}).

\"\n u\"

{accreditation_body} is the single national accreditation \"\n u\"body assessing testing and calibration laboratories for \"\n u\"compliance to the ISO/IEC 17025 standard.

\",\n required=False\n )\n","sub_path":"src/bika/lims/interfaces/laboratory.py","file_name":"laboratory.py","file_ext":"py","file_size_in_byte":3284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"509199151","text":"from abc import ABC,abstractmethod\r\nimport sys\r\nfrom random import*\r\ndef accnumber():\r\n digits='0123456789'\r\n accnu=''\r\n for i in range(12):\r\n accnu=accnu+choice(digits)\r\n return accnu\r\nclass Account(ABC):\r\n bankname='TCC Corp'\r\n statement=[]\r\n tcount=0\r\n def __init__(self,name,accnum,bal):\r\n self.name=name\r\n self.accnum=accnumber()\r\n self.bal=bal\r\n def _deposit(self,amt):\r\n self.bal=self.bal+amt\r\n print('avail balance:',self.bal)\r\n Account.statement.append('Amount credited {} : updated balance {}'. format(amt,self.bal))\r\n def _withdraw(self,amt,min_bal):\r\n while Account.tcount>5:\r\n print('Your max number of attemapts reached {}, please try after 24hrs'. format(self.name))\r\n sys.exit()\r\n if amt%100!=0:\r\n print(\"Please enter amount only in multiples of '100'\")\r\n elif(amt>self.bal):\r\n print('Insufficient funds :')\r\n elif (self.bal-amt)10000:\r\n print('Please do not exceed Transaction limit')\r\n amt=abs(float(input('enter Withdrawl amout:')))\r\n self._withdraw(amt,0)\r\n except ValueError:\r\n print('Please Enter amount in numbers only')\r\n def balenquiry(self):\r\n print('Balance in your savings acconut with acc num xxxxxxxxx{} is {} rs.'. format(self.accnum[9:],self.bal))\r\n def getaccountinfo(self):\r\n print('Name : ',self.name)\r\n print('Accout Number : ',self.accnum)\r\n print('Account type : Savings Account')\r\n def history(self):\r\n self._history()\r\nclass Currentaccount(Account):\r\n def __init__(self,name):\r\n super().__init__(name,accnum='',bal=0,)\r\n def deposit(self):\r\n try:\r\n amt=abs(float(input('Enter deposit amount:')))\r\n self._deposit(amt)\r\n except:\r\n print('Please Enter amount in numbers only')\r\n def withdraw(self):\r\n amt=abs(float(input('Enter Withdrawl money:')))\r\n while amt==0 or amt>10000:\r\n print('Please do not exceed Transaction limit')\r\n amt=abs(float(input('Enter Withdrawl amount:')))\r\n self._withdraw(amt,1000)\r\n def balenquiry(self):\r\n print('Balance in your current acconut with acc num xxxxxxxxx{} is {} rs.'. format(self.accnum[9:],self.bal))\r\n def getaccountinfo(self):\r\n print('Name : ',self.name)\r\n print('Accout Number : ',self.accnum)\r\n print('Account type : Current Account')\r\n def history(self):\r\n self._history()\r\nprint('Welcome to,',Account.bankname)\r\nprint('The page you are viewing is to create an account in {}, please follow the instructions'. format(Account.bankname))\r\nname=input('Enter your name:')\r\nprint('S - Savings account\\nC - Current account')\r\noption=input('Enter your option from above : ').lower()\r\nwhile option not in ['s','c']:\r\n option=input('Please enter a valid option from S or C :')\r\nif option=='s':\r\n a=Savingsaccount(name)\r\n print('Account number :',a.accnum)\r\nelse:\r\n a=Currentaccount(name)\r\n print('Account number :',a.accnum)\r\nwhile True:\r\n print('D : deposit\\nW : withdrawl\\nB : balance enquiry\\nS : Statement\\ni : Account info\\nE : exit transaction')\r\n option=input('Choose any option:').lower()\r\n while option not in ('d','w','b','s','i','e'):\r\n option=input('Please choose valid option from above : ').lower() \r\n if option=='d':\r\n a.deposit()\r\n print('Money deposited sucessfully')\r\n elif option=='w':\r\n a.withdraw()\r\n elif option=='b':\r\n a.balenquiry()\r\n elif option=='i':\r\n a.getaccountinfo()\r\n elif option=='s':\r\n a.history()\r\n elif option=='e':\r\n print('terminating...')\r\n print('Thankyou,', a.name)\r\n sys.exit()\r\n\r\n\r\n\r\n\r\n \r\n \r\n\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n","sub_path":"banking app.py","file_name":"banking app.py","file_ext":"py","file_size_in_byte":5041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"278390576","text":"#!/usr/bin/python3\n\nimport string\n\nwheel_count = 3\n\nwheels = [(\"EKMFLGDQVZNTOWYHXUSPAIBRCJ\", \"R\"),\n (\"AJDKSIRUXBLHWTMCQGZNPYFVOE\", \"F\"),\n (\"BDFHJLCPRTXVZNYEIWGAKMUSQO\", \"W\"),\n (\"ESOVPZJAYQUIRHXLNFTGKDCMWB\", \"K\"),\n (\"VZBRGITYUPSDNHLXAWMJQOFECK\", \"A\"),\n (\"JPGVOUMFYQBENHZRDKASXLICTW\", \"AN\"),\n (\"NZJHGRCXMYSWBOUFAIVLPEKQDT\", \"AN\"),\n (\"FKQHTLXOCBJSPDZRAMEWNIUYGV\", \"AN\")]\n\nreflectors = [\"EJMZALYXVBWFCRQUONTSPIKHGD\",\n \"YRUHQSLDPXNGOKMIEBFZCWVJAT\",\n \"FVPJIAOYEDRZXWGCTKUQSBNMHL\"]\n\nclass Wheel:\n \"\"\"A wheel class to hold the input and output strings,\n with the ability to rotate its string an arbitrary\n amount, and to encode and reverse encode a character\"\"\"\n\n def __init__(self, sequence, notches):\n self.input = string.ascii_uppercase\n self.output = sequence\n self.notches = notches\n\n def rotate(self, amount = 1):\n \"\"\"Simulate a cylinder rotating\"\"\"\n self.input = self.input[amount:] + self.input[:amount]\n self.output = self.output[amount:] + self.output[:amount]\n\n def notch_active(self):\n \"\"\"If the first character of the input string is one of the\n characters in the self.notches string, return True, else False\"\"\"\n return self.input[0] in self.notches\n\n def encode(self, char, reverse=False):\n temp = self.input[string.ascii_uppercase.index(char)]\n if not reverse:\n temp = self.output[self.input.index(temp)]\n else:\n temp = self.input[self.output.index(temp)]\n temp = string.ascii_uppercase[self.input.index(temp)]\n return temp \n\ndef get_wheel_choices():\n \"\"\"Gets a list of 0-based integers which is wheel_count items \n long from the user. Validates input, and re-asks if the user\n gives a nonsense answer\"\"\"\n\n wheel_choices = []\n\n while len(wheel_choices) < wheel_count:\n choice_number = len(wheel_choices) + 1\n choice = input(\"Please type a wheel number for slot %d, 1-8: \" % choice_number)\n\n if len(choice) == 1 and choice in string.digits and int(choice) in range(1,9):\n wheel_choices.append(int(choice)-1)\n else:\n print(\"That was an invalid choice\")\n\n return wheel_choices\n\ndef get_wheel_settings():\n \"\"\"Asks the user for a-z choices for wheel settings, returning a list of\n wheel_count length, with 0-based integers denoting the a-z positions\n chosen by the user\"\"\"\n\n wheel_settings = []\n \n while len(wheel_settings) < wheel_count:\n choice_number = len(wheel_settings) + 1\n choice = input(\"Please type a setting for wheel %d, a-z: \" % choice_number)\n\n choice = choice.upper()\n if len(choice) == 1 and choice in string.ascii_uppercase:\n wheel_settings.append(string.ascii_uppercase.index(choice))\n else:\n print(\"That was an invalid choice\")\n\n return wheel_settings\n\ndef get_reflector():\n \"\"\"Asks the user a, b, or c reflector, and returns 0, 1, or 2\"\"\"\n\n while True:\n choice = input(\"Please choose a reflector, a, b, or c: \")\n if choice in \"abc\":\n return \"abc\".index(choice)\n else:\n print(\"That was an invalid choice\")\n\ndef get_message():\n \"\"\"Get the users input message. Don't allow characters other than a-z\"\"\"\n\n while True:\n message = input(\"Please enter the message to encrypt: \").upper()\n for char in message:\n if char not in string.ascii_uppercase:\n print(\"That was an invalid message. Only characters a-z are supported\")\n break\n else:\n return message\n\ndef encrypt(working_wheels, wheel_settings, working_reflector, message):\n for i in range(len(working_wheels)):\n w = working_wheels[i]\n s = wheel_settings[i]\n w.rotate(s)\n\n # Now for the fun bit\n encrypted = []\n for char in message:\n #Rotate wheels if the notches are active\n working_wheels[2].rotate()\n if working_wheels[2].notch_active():\n working_wheels[1].rotate()\n if working_wheels[1].notch_active():\n working_wheels[0].rotate()\n\n #FIXME Plugboard is missing entirely\n\n #Encode with each wheel, in reverse order\n for i in range(len(working_wheels)-1, -1, -1):\n char = working_wheels[i].encode(char)\n \n #Encode with the reflector\n char = working_reflector[string.ascii_uppercase.index(char)]\n\n #\"Reverse\" Encode with each wheel in standard order\n for i in range(len(working_wheels)):\n char = working_wheels[i].encode(char, reverse=True)\n\n #FIXME Plugboard would happen here too\n\n #Save the encrypted character\n encrypted.append(char)\n\n return \"\".join(encrypted)\n\ndef main():\n \"\"\"Gather the users wheel choices, settings, reflector choice and message,\n encrypt the message, and print the result to the screen\"\"\"\n wheel_choices = get_wheel_choices()\n wheel_settings = get_wheel_settings()\n reflector_choice = get_reflector()\n message = get_message()\n\n # wheel_choices is a list of ints, turn it into a list of tuples. Each\n # tuple has the connections (relative to a standard alphabet) in string format,\n # and the notch positions as a second string in the tuple\n working_wheels = [Wheel(wheels[i][0], wheels[i][1]) for i in wheel_choices]\n\n # Take the integer reflector_choice and make us a working copy of that reflector\n working_reflector = reflectors[reflector_choice]\n\n # The meat and potatoes\n encrypted = encrypt(working_wheels, wheel_settings, working_reflector, message)\n\n print(\"Your encrypted text:\", encrypted)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Enigma.py","file_name":"Enigma.py","file_ext":"py","file_size_in_byte":5812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"183738391","text":"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nimport numpy as np\nimport os\nimport os.path as osp\nimport sys\nimport argparse\n\nsys.path.insert(0, \"./FairMOT/src\")\nimport lib.datasets.dataset.jde as datasets\n\ndef process(data_root, seqs, output_dir):\n for seq in seqs:\n print(\"process the\" + osp.join(data_root, seq, 'img1') + \" files\")\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n img_size = (1088, 608)\n dataloader = datasets.LoadImages(osp.join(data_root, seq, 'img1'), img_size)\n for i, (path, img, img0) in enumerate(dataloader):\n blob = torch.from_numpy(img).unsqueeze(0)\n blob = np.array(blob).astype(np.float32)\n blob.tofile(osp.join(output_dir, seq + \"_\"+\"{:0>6d}\".format(i)+\".bin\")) \n\nif __name__ == \"__main__\":\n\n parse = argparse.ArgumentParser()\n parse.add_argument(\"--data_root\", type=str, default=\"./dataset\")\n parse.add_argument(\"--output_dir\", type=str, default=\"./pre_dataset\")\n args = parse.parse_args()\n \n\n seqs_str = '''MOT17-02-SDP\n MOT17-04-SDP\n MOT17-05-SDP\n MOT17-09-SDP\n MOT17-10-SDP\n MOT17-11-SDP\n MOT17-13-SDP'''\n \n data_dir = args.data_root\n output_dir = args.output_dir\n data_root = os.path.join(data_dir, 'MOT17/images/train')\n seqs = [seq.strip() for seq in seqs_str.split()]\n process(data_root, seqs, output_dir)","sub_path":"ACL_PyTorch/contrib/cv/tracking/FairMOT/fairmot_preprocess.py","file_name":"fairmot_preprocess.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"186696564","text":"#from EmulatorGUI import GPIO\nimport RPi.GPIO as GPIO\nimport time\nimport threading\nfrom random import randint\n\nprint (\"RASPI ASYNC LEDS BY KONEY\")\nkeep_executing=True\nsleep_micro=0.01\nsleep_minimum=0.02\nsleep_shorter=0.06\nsleep_short=0.07\nsleep_medium=0.10\nsleep_long=1.1\nsleep_longer=2.4\nsleep_maximum=4\n\n#IO_ports=[14,15,18,23,24,25,8,7,11,9,10,22,27,17,4,3,2]\n# MAPPING PHISICAL PORTS TO LEDS\ntower_top=14\nfront_light=15\nside_red=[9,11,10]\nside_green=[23,25,24]\nbi_led_red=17\nbi_led_green=18\n\n#### SETUP #################\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\nGPIO.setup(tower_top,GPIO.OUT)\nGPIO.output(tower_top,True)\nGPIO.setup(bi_led_red,GPIO.OUT)\nGPIO.output(bi_led_red,True)\nGPIO.setup(bi_led_green,GPIO.OUT)\nGPIO.output(bi_led_green,True)\nGPIO.setup(front_light,GPIO.OUT)\nGPIO.output(front_light,True)\nfor i in range(len(side_red)):\n GPIO.setup(side_red[i],GPIO.OUT)\n GPIO.output(side_red[i],True)\nfor i in range(len(side_green)):\n GPIO.setup(side_green[i],GPIO.OUT)\n GPIO.output(side_green[i],True)\ntime.sleep(sleep_long)\nGPIO.output(tower_top,False)\nGPIO.output(bi_led_red,False)\nGPIO.output(bi_led_green,False)\nGPIO.output(front_light,False)\nfor i in range(len(side_red)):\n GPIO.output(side_red[i],False)\nfor i in range(len(side_green)):\n GPIO.output(side_green[i],False)\n#### END SETUP ###############\n\ndef prob_rnd(range_low,range_hi,probs_factor):\n internal_range=randint(probs_factor, range_hi);\n final_number=randint(range_low, internal_range);\n #print(final_number)\n return final_number\n\ndef tower():\n while keep_executing==True:\n loop_factor=prob_rnd(3, 11, 8)\n loop_mode=prob_rnd(1, 3, 1) #1 random #2 dec #3 inc\n sleep_factor=prob_rnd(0, 8, 5) #0,8,5\n #loop_mode=3\n if loop_mode == 1:\n #print(\"loop mode=rnd\")\n for i in range(loop_factor):\n rand=randint(i, loop_factor)\n GPIO.output(tower_top,True)\n time.sleep(sleep_micro*rand)\n rand=randint(i, loop_factor)\n GPIO.output(tower_top,False)\n time.sleep(sleep_micro*rand)\n if loop_mode == 2:\n #print(\"loop mode=dec\")\n for i in range(loop_factor*2):\n GPIO.output(tower_top,True)\n time.sleep(sleep_micro*i)\n GPIO.output(tower_top,False)\n time.sleep(sleep_micro*i)\n #print(sleep_micro*i)\n if loop_mode == 3:\n #print(\"loop mode=inc\")\n for i in range(loop_factor*3):\n GPIO.output(tower_top,True)\n time.sleep(sleep_micro)\n GPIO.output(tower_top,False)\n time.sleep(sleep_micro*(loop_factor*3-i))\n #print(sleep_micro*(loop_factor*3-i))\n #sleep_factor=randint(0, 8)\n time.sleep(sleep_long*sleep_factor)\n pass\n return\n\n\ndef sides_multiple():\n flicker_ratio=3\n while keep_executing==True:\n actual_side=side_green\n for i in range(len(actual_side)-1):\n for k in range(flicker_ratio):\n GPIO.output(actual_side[i],True)\n time.sleep(sleep_shorter)\n GPIO.output(actual_side[i],False)\n time.sleep(sleep_short)\n GPIO.output(side_red[len(side_red)-1],True)\n time.sleep(sleep_micro)\n GPIO.output(side_green[len(side_green)-1],False)\n time.sleep(sleep_long)\n\n actual_side=side_red\n for i in range(len(actual_side)-1):\n for k in range(flicker_ratio):\n GPIO.output(actual_side[i],True)\n time.sleep(sleep_shorter)\n GPIO.output(actual_side[i],False)\n time.sleep(sleep_short)\n GPIO.output(side_green[len(side_green)-1],True)\n time.sleep(sleep_micro)\n GPIO.output(side_red[len(side_red)-1],False)\n time.sleep(sleep_long)\n pass\n return\n\n\ndef bicolor():\n while keep_executing==True:\n GPIO.output(bi_led_green,False)\n GPIO.output(bi_led_red,True)\n time.sleep(sleep_long)\n GPIO.output(bi_led_red,False)\n GPIO.output(bi_led_green,True)\n time.sleep(sleep_long)\n pass\n return\n\ndef frontal_semi_broken():\n sleep_factor=4\n while keep_executing==True:\n GPIO.output(front_light,True)\n for i in range(sleep_factor):\n time.sleep(sleep_long)\n ## LIGHT SOMETIMES FLICKERS AS ITS SEMI BROKEN ##\n glitch_factor=prob_rnd(0, 5, 2)\n for k in range(glitch_factor):\n GPIO.output(front_light,False)\n #for y in range(1,randint(2, 3)):\n y=randint(1, 2)\n time.sleep(y*sleep_micro)\n GPIO.output(front_light,True)\n #for y in range(1,randint(2, 3)):\n time.sleep(y*sleep_micro)\n\n GPIO.output(front_light,False)\n time.sleep(sleep_long*sleep_factor)\n pass\n return\n\ndef frontal_pwm():\n sleep_factor=4\n pwm = GPIO.PWM(front_light, 40)\n while keep_executing==True:\n pwm.start(50)\n pwm.ChangeDutyCycle(randint(1, 70))\n pwm.ChangeFrequency(randint(1, 70))\n time.sleep(randint(1,sleep_factor))\n pwm.stop()\n time.sleep(randint(1,sleep_factor))\n pass\n return\n\ndef frontal_pwm_dim():\n sleep_factor=4\n dim_max=100\n dim_min=2\n pwm = GPIO.PWM(front_light, 100)\n pwm.start(dim_min)\n while keep_executing==True:\n #pwm.ChangeFrequency(100)\n for i in range(dim_min,dim_max,2):\n pwm.ChangeDutyCycle(i)\n time.sleep(sleep_micro)\n #pwm.ChangeFrequency(randint(5, 30))\n time.sleep(sleep_medium)\n for i in range(dim_max,dim_min,-1):\n pwm.ChangeDutyCycle(i)\n time.sleep(sleep_micro)\n time.sleep(sleep_medium)\n pass\n pwm.stop()\n return\n\n## ASYNC EXECUTE ##\nt_sides = threading.Thread(target=sides_multiple)\nt_tower = threading.Thread(target=tower)\nt_bicolor = threading.Thread(target=bicolor)\nt_front = threading.Thread(target=frontal_pwm_dim)\nt_front.start()\nt_bicolor.start()\ntime.sleep(sleep_longer)\nt_sides.start()\ntime.sleep(sleep_maximum)\nt_tower.start()\n\nimport tty\nimport sys\nimport termios\n\nprint(\"ANY KEY TO QUIT...\")\norig_settings = termios.tcgetattr(sys.stdin)\n\ntty.setraw(sys.stdin)\nx = 0\nwhile x != chr(27): # ESC\n x=sys.stdin.read(1)[0]\n termios.tcsetattr(sys.stdin, termios.TCSADRAIN, orig_settings)\n keep_executing=False\n print(\"EXIT\")\n\n sys.exit()\n GPIO.cleanup()","sub_path":"led_async.py","file_name":"led_async.py","file_ext":"py","file_size_in_byte":5668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"528825787","text":"import platform\nimport re\nimport socket\nimport subprocess\nimport sys\n\nValidIpAddressRegex = \"^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$\";\n\ndef ping(hostname, count):\n systeme = platform.system();\n option = 'c'\n if systeme == 'Windows':\n option = 'n'\n if not (re.match(ValidIpAddressRegex, hostname)):\n print(\"Invalid IP Address !\")\n exit(1)\n else:\n options = '-' + option + ' ' + count\n status_Ping = subprocess.Popen([\"ping\", options, hostname], stdout=subprocess.PIPE)\n out = status_Ping.communicate()[0]\n rtt_line = subprocess.check_output([\"grep\", \"time=\"], input=out)\n rtt_temp = subprocess.check_output([\"cut\", \"-d\", \"=\", \"-f4\"], input=rtt_line)\n rtt = subprocess.check_output([\"cut\", \"-d\", \" \", \"-f1\"], input=rtt_temp)\n rtt = rtt.decode('utf8')\n rtt = float(str(rtt))\n status = status_Ping.returncode\n return status, rtt\n\nif __name__ == '__main__':\n try:\n hostname = socket.gethostbyname(sys.argv[1])\n count = \"1\"\n if len(sys.argv) == 3:\n count = sys.argv[2]\n status, rtt = ping(hostname, count)\n except: \n socket.error\n print(\"Hostname not resolved : \" + hostname)\n","sub_path":"Client/Ping.py","file_name":"Ping.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"541589431","text":"#!/usr/bin/python\n#coding:utf-8\n\nimport requests\nfrom bs4 import BeautifulSoup\n\ndef getHTMLText(url):\n try:\n r = requests.get(url, timeout=30)\n r.raise_for_status()\n r.encoding = r.apparent_encoding\n demo = r.text\n soup = BeautifulSoup(demo, \"html.parser\")\n print(soup.prettify())\n\n except:\n return \"产生异常\"\n\ndef main():\n url = \"https://item.jd.com/5544014.html\"\n print(getHTMLText(url))\n\nif __name__ == '__main__':\n main()","sub_path":"Spider/requests_define_bs4.py","file_name":"requests_define_bs4.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"544772327","text":"from django.forms import fields\nfrom nepcore.forms.widgets import *\n\n# This maps our dataTypes to django form fields\nCUSTOM_FIELD_MAP = {\n\t\"str\": fields.CharField,\n\t\"int\": fields.IntegerField,\n\t\"dat\": fields.DateField,\n\t\"tim\": fields.TimeField\n}","sub_path":"nepcore/forms/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"389683704","text":"##########################################\n### Test\n### class\n### Marcus Blaisdell\n##########################################\n\nimport random\nimport time\nfrom functions import sign\n\nclass pr_Class():\n ### global variables:\n trainData = []\n validationData = []\n testData = []\n T = 1 # default value of T\n k = 1 # default value of k\n kmer = 11 # default k-mer size is 11\n #Tlist = [5, 10, 100, 1000, 5000, 10000]\n Tlist = [20]\n #klist = [1, 5, 10]\n klist = [1]\n w = {} # weight is a dictionary\n eta = 1.0 # eta is learning rate\n trainMistakes = 0\n trainTotal = 0\n testMistakes = 0\n testTotal = 0\n validationMistakes = 0\n validationTotal = 0\n trainPrecision = 0.0\n trainRecall = 0.0\n trainnpr = 0 # numerator for trainPrecision and trainRecall\n traindp = 0 # denominator for trainPrecision\n trainF1 = 0.0\n validationPrecision = 0.0\n validationRecall = 0.0\n validationnpr = 0 # numerator for precision and recall\n validationdp = 0 # denominator for Precision\n validationF1 = 0.0\n testPrecision = 0.0\n testRecall = 0.0\n testnpr = 0 # numerator for precision and recall\n testdp = 0 # denominator for Precision\n testF1 = 0.0\n trainGood = 0\n validationGood = 0\n testGood = 0\n trainAccuracy = 0.0\n validationAccuracy = 0.0\n testAccuracy = 0.0\n\n def __init__(self):\n pass\n\n ### perceptron function:\n\n #def perceptron(self, trainDataSet, testDataSet, outFile):\n def perceptron(self, trainDataSet):\n #self.trainTotal = len(trainDataSet)\n\n xit = [] # x-sub-i-sub-t, the training vector for the current iterations\n yit = 0 # y-sub-i-sub-t, the label for the current training vector (yStar)\n yHat = 0\n\n for l in range (len(trainDataSet)):\n self.trainTotal += 1\n xit = trainDataSet[l][0]\n yit = trainDataSet[l][1]\n\n ### make the prediction: yHat = y*()\n #yHat = yit * self.dotProd(xit) # Method U\n #yHat = yit * (self.dotProd(xit) + self.b) # Method V\n #yHat = sign(yit * (self.dotProd(xit) + self.b)) # Method W\n yHat = sign(yit * self.dotProd(xit)) # Method X\n\n #print 'initial eval: ', yit, ' : ', yHat\n\n ### update weight accordingly,\n ### if predicted value and actual value don't match,\n ## update weight,\n ### if they do match no update required\n if yit == 1:\n self.trainGood += 1\n if yHat > 0:\n self.trainnpr += 1\n self.traindp += 1\n else:\n #print yit, ' : ', yHat\n self.updateWeight(xit, yit)\n self.trainMistakes += 1\n else:\n if yHat > 0:\n #print yit, ' : ', yHat\n self.trainMistakes += 1\n self.updateWeight(xit, yit)\n else:\n self.traindp += 1\n\n ### end train loop, trains on each sample in trainData\n\n ### end iteration loop\n\n ### end perceptron function\n\n ### use new weight to test accuracy on test dataList\n\n #def testWeight (self, trainDataSet, testDataSet, outFile, t):\n def testWeight (self, testDataSet, t):\n # reset mistakes count so each iteration starts at 0\n\n xit = [] # x-sub-i-sub-t, the training vector for the current iterations\n yit = 0 # y-sub-i-sub-t, the label for the current training vector (yStar)\n yHat = 0\n\n ### evaluate all test samples:\n\n for i in range(len(testDataSet)):\n self.testTotal += 1\n xit = testDataSet[i][0]\n yit = testDataSet[i][1]\n\n ### make the prediction:\n #yHat = yit * self.dotProd(xit) # Method A\n #yHat = yit * (self.dotProd(xit) + self.b) # Method B\n #yHat = sign(yit * (self.dotProd(xit) + self.b)) # Method C\n yHat = sign(yit * self.dotProd(xit)) # Method D\n\n ### if the value is actually good,\n ### and we predicted good, increment npr\n ### which is the # lines predicted as good that are actually good\n if yit == 1:\n self.testGood += 1\n if yHat > 0:\n self.testnpr += 1\n self.testdp += 1\n else:\n self.testMistakes += 1\n if yit == -1:\n if yHat > 0:\n self.testMistakes += 1\n\n ### end testWeight function\n\n ### function dotProd(), dot product by index\n def dotProd(self, xArray):\n #startTime = time.time()\n result = 0.0\n\n for element in xArray:\n if self.w.get(element[0], '--') == '--':\n self.w[element[0]] = 0\n else:\n result += self.w[element[0]] * element[1]\n\n #endTime = time.time()\n #print 'dotProd2: ', endTime - startTime\n\n return result\n\n ### end dotProd() function\n\n ### updateWeight function:\n def updateWeight(self, xarray, yStar):\n #startTime = time.time()\n\n ### update the weights in the weight vector that are in xit\n for element in xarray:\n self.w[element[0]] += self.eta * yStar * element[1]\n\n #endTime = time.time()\n #print 'updateWeight: ', endTime - startTime\n\n ### end updateWeight function\n","sub_path":"Ranking/pr_Class.py","file_name":"pr_Class.py","file_ext":"py","file_size_in_byte":5452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"146325361","text":"# Задача 0\r\ndef len_of_words(new_string=None, separator=None):\r\n if not new_string:\r\n new_string = input('Type your text: ')\r\n if not separator:\r\n separator = input('Type separator: ')\r\n # Условия введены, что бы проще было использовать проверяющую функцию (этакий кастыль).\r\n list_of_words = new_string.split(separator)\r\n for i, j in enumerate(list_of_words):\r\n list_of_words[i] = str(len(j)) + j\r\n return list_of_words\r\n\r\n\r\nprint(len_of_words())\r\n\r\n\r\ndef test_func():\r\n if len_of_words('asd d ddd fef', ' ') == ['3asd', '1d', '3ddd', '3fef']:\r\n print('All right!')\r\n else:\r\n print('You have a mistake in your function!')\r\n\r\n\r\ntest_func()","sub_path":"Lesson_6/#0.py","file_name":"#0.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"319216271","text":"#!/usr/bin/env python3\n\"\"\"\nredis.\n\"\"\"\nfrom functools import wraps\nimport redis\nfrom typing import Union, Callable, Optional, Any\nimport uuid\n\n\ndef count_calls(method: Callable) -> Callable:\n \"\"\"Incrementing values\"\"\"\n @wraps(method)\n def wrapper(self, *args, **kwargs):\n \"\"\"wrapper func\"\"\"\n self._redis.incr(method.__qualname__)\n return method(self, *args, **kwargs)\n return wrapper\n\n\ndef call_history(method: Callable) -> Callable:\n \"\"\"Storing lists\"\"\"\n @wraps(method)\n def wrapper(self, *args):\n \"\"\"wrapper func\"\"\"\n self._redis.rpush(f\"{method.__qualname__}:inputs\", str(args))\n output = method(self, *args)\n self._redis.rpush(f\"{method.__qualname__}:outputs\", str(output))\n return output\n return wrapper\n\n\ndef replay(fn: Callable) -> str:\n \"\"\"Retrieving lists\"\"\"\n method = fn.__qualname__\n inputs = f\"{method}:inputs\"\n outputs = f\"{method}:outputs\"\n inp_list = fn.__self__._redis.lrange(inputs, 0, -1)\n out_list = fn.__self__._redis.lrange(outputs, 0, -1)\n Q = fn.__self__._redis.get(method).decode('utf-8')\n print(f\"{method} was called {Q} times:\")\n for inp, out in zip(inp_list, out_list):\n print(f\"{method}(*{inp.decode('utf-8')}) -> {out.decode('utf-8')}\")\n\n\nclass Cache:\n \"\"\"Cache Class.\"\"\"\n\n def __init__(self):\n \"\"\"__Init__.\"\"\"\n self._redis = redis.Redis()\n self._redis.flushdb()\n\n @call_history\n @count_calls\n def store(self, data: Union[str, bytes, int, float]) -> str:\n \"\"\"Takes a data argument and returns a string.\"\"\"\n uid = str(uuid.uuid4())\n self._redis.set(uid, data)\n return uid\n\n def get(self, data: str, fn: Optional[Callable] = None) ->\\\n Union[str, bytes, int, float]:\n \"\"\"take a key string argument and\n an optional Callable argument named fn.\"\"\"\n if data:\n res = self._redis.get(data)\n if fn:\n return fn(res)\n else:\n return res\n\n def get_str(self, data: bytes) -> str:\n \"\"\"Automatically parametrize Cache.get with\n the correct conversion function.\"\"\"\n return data.decode(\"utf-8\")\n\n def get_int(self, data: bytes) -> int:\n \"\"\"Automatically parametrize Cache.get with\n the correct conversion function.\"\"\"\n return int(data)\n","sub_path":"0x0B_redis_basic/exercise.py","file_name":"exercise.py","file_ext":"py","file_size_in_byte":2370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"64187952","text":"import os\n\n\ndef delete_unneeded(dir, size_limit):\n for root, dirs, files in os.walk(dir):\n for file in files:\n path = os.path.join(root, file)\n if os.path.getsize(path) > size_limit:\n print(path)\n\n\ndelete_unneeded('.\\\\chapter08\\\\', 5000)\n","sub_path":"chapter09/deleteunneeded.py","file_name":"deleteunneeded.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"253073369","text":"import re\n\nimport pandas as pd\nimport torch\nimport torch.distributed as distrib\nfrom nltk.tokenize import sent_tokenize\nfrom torch.utils.data import DataLoader, Dataset, DistributedSampler\nfrom unidecode import unidecode\n\nimport pysrc.review.config as cfg\nfrom pysrc.review.utils import ids_to_sent\n\n\ndef load_pubmedtop50(parts):\n return [\n pd.read_csv(f\"{cfg.data_path}/pubmedtop50_{part}.csv\", index_col='id')\n for part in parts\n ]\n\n\ndef preprocess_paper(text, max_len, tokenizer):\n sents = [[tokenizer.artBOS.tkn] + tokenizer.tokenize(sent) + [tokenizer.artEOS.tkn]\n for sent in sent_tokenize(text)]\n ids, segments, segment_signature = [], [], 0\n n_setns = 0\n for s in sents:\n if len(ids) + len(s) <= max_len:\n n_setns += 1\n ids.extend(tokenizer.convert_tokens_to_ids(s))\n segments.extend([segment_signature] * len(s))\n segment_signature = (segment_signature + 1) % 2\n else:\n break\n mask = [1] * len(ids)\n\n pad_len = max(0, max_len - len(ids))\n ids += [tokenizer.PAD.idx] * pad_len\n mask += [0] * pad_len\n segments += [segment_signature] * pad_len\n\n return ids, mask, segments, n_setns\n\n\ndef standardize(text):\n \"\"\" Standardize text span\n \"\"\"\n\n text = unidecode(text)\n text = text.replace('--', '-')\n text = text.replace(';', \".\")\n text = text.replace('...', \".\")\n text = text.replace('..', \".\")\n text = text.replace(\"'''\", \"'\")\n text = text.replace(\"''\", \"'\")\n text = text.replace(\"```\", \"`\")\n text = text.replace(\"``\", \"`\")\n text = text.strip()\n text = re.sub(r'\\s([?.!\"](?:\\s|$))', r'\\1', text)\n text = re.sub(r'\\([^)]*\\)', '', text)\n\n return text\n\n\nclass TrainDataset(Dataset):\n \"\"\" Custom Train Dataset\n \"\"\"\n\n def __init__(self, dataframe, tokenizer, article_len):\n self.df = dataframe\n self.n_examples = len(dataframe)\n self.tokenizer = tokenizer\n self.article_len = article_len\n\n def __getitem__(self, idx):\n\n ex = self.df.iloc[idx]\n paper = standardize(ex.paper_top50)\n # abstract = standardize(ex.abstract)\n try:\n gold_ids = [int(e) for e in ex.gold_ids_top6.strip(\"[]\").split(',')]\n except Exception:\n gold_ids = []\n\n article_ids, article_mask, article_segment, n_setns = \\\n preprocess_paper(paper, self.article_len, self.tokenizer)\n\n # form target\n target = [(1 if i in gold_ids else 0) for i in range(n_setns)]\n\n return article_ids, article_mask, article_segment, target\n\n def __len__(self):\n return self.n_examples\n\n\nclass EvalDataset(Dataset):\n \"\"\" Custom Valid/Test Dataset\n \"\"\"\n\n def __init__(self, dataframe, tokenizer, article_len):\n self.df = dataframe\n self.n_examples = len(dataframe)\n self.tokenizer = tokenizer\n self.article_len = article_len\n\n def __getitem__(self, idx):\n ex = self.df.iloc[idx]\n paper = standardize(ex.paper_top50)\n abstract = standardize(ex.abstract)\n\n gold_ids = [int(e) for e in ex.gold_ids_top6.strip(\"[]\").split(',')]\n gold_sents = self.extract_gold_sents(paper, gold_ids)\n\n article_ids, article_mask, article_segment, n_setns = \\\n preprocess_paper(paper, self.article_len, self.tokenizer)\n\n # cut gold ids\n gold_ids = [e for e in gold_ids if e < n_setns]\n gold_text = ' '.join(gold_sents[:len(gold_ids)])\n if not gold_text:\n gold_text = ' '.join(gold_sents)\n\n return paper, article_ids, article_mask, article_segment, gold_text, abstract\n\n @staticmethod\n def extract_gold_sents(paper, gold_ids):\n paper = sent_tokenize(paper)\n gold_sents = [sent for i, sent in enumerate(paper) if i in gold_ids]\n return gold_sents\n\n def __len__(self):\n return self.n_examples\n\n\ndef train_collate_fn(batch_data):\n \"\"\" Function to pull batch for train\n\n :param batch_data: list of `TrainDataset` Examples\n :return:\n one batch of data\n \"\"\"\n data0, data1, data2, data3 = list(zip(*batch_data))\n\n return torch.tensor(data0, dtype=torch.long), \\\n torch.tensor(data1, dtype=torch.long), \\\n torch.tensor(data2, dtype=torch.long), \\\n [torch.tensor(e, dtype=torch.float) for e in data3]\n\n\ndef eval_collate_fn(batch_data):\n \"\"\" Function to pull batch for valid/test\n\n :param batch_data: list of `EvalDataset` Examples\n :return:\n one batch of data\n \"\"\"\n\n return [torch.tensor(data_prt, dtype=torch.long) if not isinstance(data_prt[0], str)\n else data_prt for data_prt in list(zip(*batch_data))]\n\n\ndef load_data(dataset_type, parts):\n assert dataset_type in ['pubmed']\n return load_pubmedtop50(parts)\n\n\ndef create_ddp_loader(dataset, batch_size, collate_fn):\n return DataLoader(\n dataset=dataset, batch_size=batch_size,\n sampler=DistributedSampler(\n dataset=dataset, num_replicas=distrib.get_world_size(), rank=distrib.get_rank()\n ),\n num_workers=cfg.num_workers, shuffle=False, pin_memory=True, collate_fn=collate_fn,\n )\n\n\ndef create_loader(dataset, batch_size, collate_fn):\n return DataLoader(\n dataset=dataset, batch_size=batch_size, shuffle=False,\n pin_memory=True, collate_fn=collate_fn, num_workers=cfg.num_workers\n )\n\n\nif __name__ == \"__main__\":\n \"\"\"\n some tests\n \"\"\"\n\n data_sz = 'small'\n btch_sz = 1\n dstype = 'cnndm'\n data_parts = load_data(dstype, data_sz)\n\n train_dl, valid_dl, test_dl = [\n create_loader(data_part, btch_sz, collate_fn) for data_part, collate_fn\n in zip(data_parts, [train_collate_fn, eval_collate_fn, eval_collate_fn])\n ]\n\n print(len(train_dl.dataset))\n print(len(valid_dl.dataset))\n print(len(test_dl.dataset))\n\n for btch in train_dl:\n art_ids = btch[0]\n sum_ids = btch[3]\n\n print(ids_to_sent(art_ids[0]))\n print('----------------')\n print(ids_to_sent(sum_ids[0]))\n break\n","sub_path":"pysrc/review/train/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":6076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"59441669","text":"from flask import Flask, jsonify, abort, make_response\nfrom flask_restful import Api, Resource, reqparse, fields, marshal\nimport pymongo\n\napp = Flask(__name__, static_url_path=\"\")\napi = Api(app)\n\ntasks = []\n\nconnection = pymongo.MongoClient('35.175.190.82', 27017)\n\ndatabase = connection['mydb_1']\n\ncollection = database['mycol_1']\n\nfor i in collection.find({}):\n tasks.append(i)\n\ntask_fields = {\n 'id' : fields.Integer,\n 'title': fields.String,\n 'description': fields.String,\n 'done': fields.Boolean\n\n}\n\n\nclass HealthCheck(Resource):\n def get(self):\n return 200\n\n\n\n\nclass TaskListAPI(Resource):\n def __init__(self):\n self.reqparse = reqparse.RequestParser()\n self.reqparse.add_argument('title', type=str, required=True,\n help='No task title provided',\n location='json')\n self.reqparse.add_argument('description', type=str, default=\"\",\n location='json')\n super(TaskListAPI, self).__init__()\n\n def get(self):\n return {'tasks': [marshal(task, task_fields) for task in collection.find({})]}\n\n def post(self):\n args = self.reqparse.parse_args()\n task = {\n 'id': tasks[-1]['id'] + 1 if len(tasks) > 0 else 1,\n 'title': args['title'],\n 'description': args['description'],\n 'done': False\n }\n tasks.append(task)\n collection.insert_one(task)\n return {'task': marshal(task, task_fields)}, 201\n\n\nclass TaskAPI(Resource):\n def __init__(self):\n self.reqparse = reqparse.RequestParser()\n self.reqparse.add_argument('title', type=str, location='json')\n self.reqparse.add_argument('description', type=str, location='json')\n self.reqparse.add_argument('done', type=bool, location='json')\n super(TaskAPI, self).__init__()\n\n def get(self, id):\n task = [task for task in collection.find({}) if task['id'] == id]\n if len(task) == 0:\n abort(404)\n return {'task': marshal(task[0], task_fields)}\n\n def put(self, id):\n task = [task for task in collection.find({}) if task['id'] == id]\n if len(task) == 0:\n abort(404)\n task = task[0]\n args = self.reqparse.parse_args()\n for k, v in args.items():\n if v is not None:\n task[k] = v\n collection.update({'id' : id},{'$set' : {k : v}})\n return {'task': marshal(task, task_fields)}\n\n def delete(self, id):\n task = [task for task in collection.find({}) if task['id'] == id]\n if len(task) == 0:\n abort(404)\n tasks.remove(task[0])\n collection.remove({'id':id})\n return {'result': True}\n\n\napi.add_resource(TaskListAPI, '/tasks', endpoint='tasks')\napi.add_resource(TaskAPI, '/tasks/', endpoint='task')\napi.add_resource(HealthCheck, '/healthcheck/', endpoint='healthcheck')\n\n\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"71503900","text":"def return_subtitle_extent(fname, playlist = -1, subtitle = 0):\n # Import modules ...\n import re\n import subprocess\n\n # Load sub-functions ...\n from .return_media_duration import return_media_duration\n from .return_video_frame_rate import return_video_frame_rate\n from .return_video_height import return_video_height\n from .return_video_width import return_video_width\n\n # Check input ...\n if fname.startswith(\"bluray:\") and playlist < 0:\n raise Exception(\"a Blu-ray was specified but no playlist was supplied\")\n\n # Find out information about video ...\n duration = return_media_duration(fname, playlist = playlist) # [s]\n fps = return_video_frame_rate(fname, playlist = playlist) # [Hz]\n height = return_video_height(fname, playlist = playlist) # [px]\n width = return_video_width(fname, playlist = playlist) # [px]\n\n # Find stream info ...\n if fname.startswith(\"bluray:\"):\n proc = subprocess.Popen(\n [\n \"ffmpeg\",\n \"-hide_banner\",\n \"-f\", \"lavfi\",\n \"-i\", \"color=color=black:size={0:d}x{1:d}:rate={2:f}:duration={3:f},format=yuv420p\".format(width, height, fps, duration),\n \"-probesize\", \"3G\",\n \"-analyzeduration\", \"1800M\",\n \"-playlist\", \"{0:d}\".format(playlist),\n \"-i\", fname,\n \"-filter_complex\", \"[0:v:0][1:s:{0:d}]overlay,cropdetect\".format(subtitle),\n \"-an\",\n \"-sn\",\n \"-vn\",\n \"-y\",\n \"-f\", \"null\",\n \"/dev/null\"\n ],\n encoding = \"utf-8\",\n stderr = subprocess.PIPE,\n stdout = subprocess.PIPE\n )\n stdout, stderr = proc.communicate()\n if proc.returncode != 0:\n raise Exception(\"\\\"ffmpeg\\\" command failed\")\n else:\n proc = subprocess.Popen(\n [\n \"ffmpeg\",\n \"-hide_banner\",\n \"-f\", \"lavfi\",\n \"-i\", \"color=color=black:size={0:d}x{1:d}:rate={2:f}:duration={3:f},format=yuv420p\".format(width, height, fps, duration),\n \"-probesize\", \"3G\",\n \"-analyzeduration\", \"1800M\",\n \"-i\", fname,\n \"-filter_complex\", \"[0:v:0][1:s:{0:d}]overlay,cropdetect\".format(subtitle),\n \"-an\",\n \"-sn\",\n \"-vn\",\n \"-y\",\n \"-f\", \"null\",\n \"/dev/null\"\n ],\n encoding = \"utf-8\",\n stderr = subprocess.PIPE,\n stdout = subprocess.PIPE\n )\n stdout, stderr = proc.communicate()\n if proc.returncode != 0:\n # HACK: Fallback and attempt to load it as a raw M-JPEG stream.\n proc = subprocess.Popen(\n [\n \"ffmpeg\",\n \"-hide_banner\",\n \"-f\", \"lavfi\",\n \"-i\", \"color=color=black:size={0:d}x{1:d}:rate={2:f}:duration={3:f},format=yuv420p\".format(width, height, fps, duration),\n \"-probesize\", \"3G\",\n \"-analyzeduration\", \"1800M\",\n \"-f\", \"mjpeg\",\n \"-i\", fname,\n \"-filter_complex\", \"[0:v:0][1:s:{0:d}]overlay,cropdetect\".format(subtitle),\n \"-an\",\n \"-sn\",\n \"-vn\",\n \"-y\",\n \"-f\", \"null\",\n \"/dev/null\"\n ],\n encoding = \"utf-8\",\n stderr = subprocess.PIPE,\n stdout = subprocess.PIPE\n )\n stdout, stderr = proc.communicate()\n if proc.returncode != 0:\n raise Exception(\"\\\"ffmpeg\\\" command failed\")\n\n # Initialize values ...\n y1 = height # [px]\n y2 = 0 # [px]\n\n # Loop over matches ...\n for match in re.findall(r\"crop=[0-9]+:[0-9]+:[0-9]+:[0-9]+\", stderr):\n # Extract information ...\n h = int(match.split(\"=\")[1].split(\":\")[1]) # [px]\n y = int(match.split(\"=\")[1].split(\":\")[3]) # [px]\n\n # Update values ...\n y1 = min(y1, y) # [px]\n y2 = max(y2, y + h) # [px]\n\n # Return answer ...\n return y1, y2\n","sub_path":"return_subtitle_extent.py","file_name":"return_subtitle_extent.py","file_ext":"py","file_size_in_byte":4652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"13644930","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^title/$',views.title,name='title'),\n url(r'^map/$',views.map,name='map'),\n url(r'^opening/$' ,views.opening,name='opening'),\n #stage1-1\n url(r'^stage1-1/$',views.stage1_1,name='stage1-1'),\n url(r'^stage1-1/story1-1/$',views.story1_1,name='story1-1'),\n url(r'^stage1-1/game1-1/$',views.game1_1,name='game1-1'),\n url(r'^stage1-1/head1-1/$',views.head1_1,name='head1-1'),\n url(r'^stage1-1/nextstory1-1/$',views.nextstory1_1,name='nextstory1-1'),\n url(r'^omake/$',views.omake,name='omake'),\n #stage1-2\n url(r'^stage1-2/$',views.stage1_2,name='stage1-2'),\n url(r'^stage1-2/story1-2/$',views.story1_2,name='story1-2'),\n url(r'^stage1-2/game1-2/$',views.game1_2,name='game1-2'),\n url(r'^stage1-2/head1-2/$',views.head1_2,name='head1-2'),\n url(r'^stage1-2/nextstory1-2/$',views.nextstory1_2,name='nextstory1-2'),\n #stage1-3\n url(r'^stage1-3/$',views.stage1_3,name='stage1-3'),\n url(r'^stage1-3/story1-3/$',views.story1_3,name='story1-3'),\n url(r'^stage1-3/game1-3/$',views.game1_3,name='game1-3'),\n url(r'^stage1-3/head1-3/$',views.head1_3,name='head1-3'),\n url(r'^stage1-3/nextstory1-3/$',views.nextstory1_3,name='nextstory1-3'),\n #stage1-4\n url(r'^stage1-4/$',views.stage1_4,name='stage1-4'),\n url(r'^stage1-4/story1-4/$',views.story1_4,name='story1-4'),\n url(r'^stage1-4/game1-4/$',views.game1_4,name='game1-4'),\n url(r'^stage1-4/head1-4/$',views.head1_4,name='head1-4'),\n url(r'^stage1-4/nextstory1-4/$',views.nextstory1_4,name='nextstory1-4'),\n #stage2-1\n url(r'^stage2-1/$',views.stage2_1,name='stage2-1'),\n url(r'^stage2-1/story2-1/$',views.story2_1,name='story2-1'),\n url(r'^stage2-1/game2-1/$',views.game2_1,name='game2-1'),\n url(r'^stage2-1/head2-1/$',views.head2_1,name='head2-1'),\n url(r'^stage2-1/nextstory2-1/$',views.nextstory2_1,name='nextstory2-1'),\n url(r'^stage2-1/gameover2-1/$',views.gameover2_1,name='gameover2-1'),\n #stage2-2\n url(r'^stage2-2/$',views.stage2_2,name='stage2-2'),\n url(r'^stage2-2/story2-2/$',views.story2_2,name='story2-2'),\n url(r'^stage2-2/game2-2/$',views.game2_2,name='game2-2'),\n url(r'^stage2-2/head2-2/$',views.head2_2,name='head2-2'),\n url(r'^stage2-2/nextstory2-2/$',views.nextstory2_2,name='nextstory2-2'),\n url(r'^stage2-2/gameover2-2/$',views.gameover2_2,name='gameover2-2'),\n #stage2-3\n url(r'^stage2-3/$',views.stage2_3,name='stage2-3'),\n url(r'^stage2-3/story2-3/$',views.story2_3,name='story2-3'),\n url(r'^stage2-3/game2-3/$',views.game2_3,name='game2-3'),\n url(r'^stage2-3/head2-3/$',views.head2_3,name='head2-3'),\n url(r'^stage2-3/nextstory2-3/$',views.nextstory2_3,name='nextstory2-3'),\n url(r'^stage2-3/gameover2-3/$',views.gameover2_3,name='gameover2-3'),\n #stage3-1\n url(r'^stage3-1/$',views.stage3_1,name='stage3-1'),\n url(r'^stage3-1/story3-1/$',views.story3_1,name='story3-1'),\n url(r'^stage3-1/game3-1/$',views.game3_1,name='game3-1'),\n url(r'^stage3-1/head3-1/$',views.head3_1,name='head3-1'),\n url(r'^stage3-1/nextstory3-1/$',views.nextstory3_1,name='nextstory3-1'),\n url(r'^stage3-1/gameover3-1/$',views.gameover3_1,name='gameover3-1'),\n #stage3-2\n url(r'^stage3-2/$',views.stage3_2,name='stage3-2'),\n url(r'^stage3-2/story3-2/$',views.story3_2,name='story3-2'),\n url(r'^stage3-2/game3-2/$',views.game3_2,name='game3-2'),\n url(r'^stage3-2/head3-2/$',views.head3_2,name='head3-2'),\n url(r'^stage3-2/nextstory3-2/$',views.nextstory3_2,name='nextstory3-2'),\n url(r'^stage3-2/gameover3-2/$',views.gameover3_2,name='gameover3-2'),\n #stage3-3\n url(r'^stage3-3/$',views.stage3_3,name='stage3-3'),\n url(r'^stage3-3/story3-3/$',views.story3_3,name='story3-3'),\n url(r'^stage3-3/game3-3/$',views.game3_3,name='game3-3'),\n url(r'^stage3-3/head3-3/$',views.head3_3,name='head3-3'),\n url(r'^stage3-3/nextstory3-3/$',views.nextstory3_3,name='nextstory3-3'),\n url(r'^stage3-3/gameover3-3/$',views.gameover3_3,name='gameover3-3'),\n #stage4-1\n url(r'^stage4-1/$',views.stage4_1,name='stage4-1'),\n url(r'^stage4-1/story4-1/$',views.story4_1,name='story4-1'),\n url(r'^stage4-1/game4-1/$',views.game4_1,name='game4-1'),\n url(r'^stage4-1/head4-1/$',views.head4_1,name='head4-1'),\n url(r'^stage4-1/nextstory4-1/$',views.nextstory4_1,name='nextstory4-1'),\n url(r'^stage4-1/see4-1/$',views.see4_1,name='see4-1'),\n url(r'^stage4-1/input4-1/$',views.input4_1,name='input4-1'),\n\n url(r'^stage4-2/$',views.stage4_2,name='stage4-2'),\n url(r'^stage4-2/story4-2/$',views.story4_2,name='story4-2'),\n url(r'^stage4-2/game4-2/$',views.game4_2,name='game4-2'),\n url(r'^stage4-2/head4-2/$',views.head4_2,name='head4-2'),\n url(r'^stage4-2/nextstory4-2/$',views.nextstory4_2,name='nextstory4-2'),\n url(r'^stage4-2/see4-2/$',views.see4_2,name='see4-2'),\n url(r'^stage4-2/see4-2-1/$',views.see4_2_1,name='see4-2-1'),\n url(r'^stage4-2/input4-2/$',views.input4_2,name='input4-2'),\n\n url(r'^stage4-3/$',views.stage4_3,name='stage4-3'),\n url(r'^stage4-3/story4-3/$',views.story4_3,name='story4-3'),\n url(r'^stage4-3/game4-3/$',views.game4_3,name='game4-3'),\n url(r'^stage4-3/head4-3/$',views.head4_3,name='head4-3'),\n url(r'^stage4-3/nextstory4-3/$',views.nextstory4_3,name='nextstory4-3'),\n url(r'^stage4-3/see4-3/$',views.see4_3,name='see4-3'),\n url(r'^stage4-3/input4-3/$',views.input4_3,name='input4-3'),\n\n url(r'^stage4-4/$',views.stage4_4,name='stage4-4'),\n url(r'^stage4-4/story4-4/$',views.story4_4,name='story4-4'),\n url(r'^stage4-4/game4-4/$',views.game4_4,name='game4-4'),\n url(r'^stage4-4/head4-4/$',views.head4_4,name='head4-4'),\n url(r'^stage4-4/nextstory4-4/$',views.nextstory4_4,name='nextstory4-4'),\n url(r'^stage4-4/see4-4/$',views.see4_4,name='see4-4'),\n url(r'^stage4-4/input4-4/$',views.input4_4,name='input4-4'),\n\n url(r'^stage4-5/$',views.stage4_5,name='stage4-5'),\n url(r'^stage4-5/story4-5/$',views.story4_5,name='story4-5'),\n url(r'^stage4-5/game4-5/$',views.game4_5,name='game4-5'),\n url(r'^stage4-5/head4-5/$',views.head4_5,name='head4-5'),\n url(r'^stage4-5/nextstory4-5/$',views.nextstory4_5,name='nextstory4-5'),\n url(r'^stage4-5/see4-5/$',views.see4_5,name='see4-5'),\n url(r'^stage4-5/input4-5/$',views.input4_5,name='input4-5'),\n\n url(r'^stage4-6/$',views.stage4_6,name='stage4-6'),\n url(r'^stage4-6/story4-6/$',views.story4_6,name='story4-6'),\n url(r'^stage4-6/game4-6/$',views.game4_6,name='game4-6'),\n url(r'^stage4-6/head4-6/$',views.head4_6,name='head4-6'),\n url(r'^stage4-6/nextstory4-6/$',views.nextstory4_6,name='nextstory4-6'),\n url(r'^stage4-6/see4-6/$',views.see4_6,name='see4-6'),\n url(r'^stage4-6/input4-6/$',views.input4_6,name='input4-6'),\n url(r'^stage4-6/movie4-6/$',views.movie4_6,name='movie4-6'),\n\n url(r'^boss/$',views.boss,name='boss'),\n url(r'^boss/start-boss/$',views.start_boss,name='start_boss'),\n url(r'^boss-second/$',views.boss_second,name='boss-second'),\n\n url(r'^boss/movie-boss/$',views.movie_boss,name='movie-boss'),\n url(r'^boss/story-boss/$',views.story_boss,name='story-boss'),\n url(r'^boss/story-boss2/$',views.story_boss2,name='story-boss2'),\n url(r'^boss/story-bosslast/$',views.story_bosslast,name='story-bosslast'),\n\n url(r'^boss-second/story-boss3/$',views.story_boss3,name='story-boss3'),\n\n url(r'^boss/head-boss/$',views.head_boss,name='head-boss'),\n url(r'^boss-second/head-boss-second/$',views.head_boss_second,name='head-boss-second'),\n\n url(r'^boss/game-boss/$',views.game_boss,name='game-boss'),\n\n url(r'^boss2/$',views.boss2,name='boss2'),\n url(r'^ending/$',views.ending,name='ending'),\n url(r'^END/$',views.END,name='END')\n\n]\n","sub_path":"game/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":7790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"371971686","text":"# coding=utf-8\r\n# Definition for a binary tree node.\r\n# class TreeNode(object):\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.left = None\r\n# self.right = None\r\n\r\nclass Solution(object):\r\n def countNodes(self, root):\r\n \"\"\"\r\n :type root: TreeNode\r\n :rtype: int\r\n \"\"\"\r\n if not root:\r\n return 0\r\n left=self.heightL(root)+1\r\n right=self.heightR(root)+1\r\n if left==right:\r\n return 2**(left)-1\r\n else:\r\n return 1+self.countNodes (root.left)+self .countNodes(root .right)\r\n \r\n def heightL(self, root):\r\n count=0\r\n while root.left:\r\n count+=1\r\n root=root.left\r\n return count\r\n \r\n def heightR(self, root):\r\n count=0\r\n while root.right:\r\n count+=1\r\n root=root.right\r\n\r\n","sub_path":"A-F/Count Complete Tree Nodes.py","file_name":"Count Complete Tree Nodes.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"330219786","text":"'''\n Quick Sort (100 Marks)\nGiven an input array of integers, sort the whole array using Quick Sort.\n\nInput Format\nYou will be given an array of integers of size N. \n\nConstraints\n1 < N < 10^5\n1 < A[i] < 10^6\n\nOutput Format\nYou need to print sorted integer array elements separated by space. \n\nSample TestCase 1\nInput\n\n6\n6\n1\n6\n7\n3\n1\n\nOutput\n\n1\n1\n3\n6\n6\n7\n\n\n'''\n''' Read input from STDIN. Print your output to STDOUT '''\n #Use input() to read input from STDIN and use print to write your output to STDOUT\n\nfrom sys import stdout \ndef quick_sort(splist , first, last):\n\tif first < last :\n\t\tsplit_point = partition(splist,first,last)\n\n\t\tquick_sort(splist, first, split_point-1)\n\t\tquick_sort(splist, split_point + 1, last)\n\ndef partition(splist, first, last):\n\tpivot_value = splist[first]\n\tleft_mark = first + 1\n\tright_mark = last\n\tflag = True\n\n\twhile flag:\n\t\twhile left_mark<= right_mark and splist[left_mark]<= pivot_value:\n\t\t\tleft_mark += 1\n\n\t\twhile splist[right_mark]>= pivot_value and right_mark>=left_mark:\n\t\t\tright_mark -= 1\n\n\t\tif right_mark < left_mark:\n\t\t\tflag= False\n\t\telse:\n\t\t\tsplist[left_mark], splist[right_mark] = splist[right_mark],splist[left_mark]\n\n\tsplist[first],splist[right_mark] = splist[right_mark],splist[first]\n\n\treturn right_mark\n\n\ndef main():\n n = int(input().strip())\n splist = [int(input().strip()) for _ in range(n)]\n quick_sort(splist, 0 , n-1)\n\n stdout.write(\"\\n\".join(map(str,splist)))\n\nmain()","sub_path":"techgig_solutions/algorithms/sorting/quick_sort/quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"33977763","text":"# import pymysql\n#\n# # 获取数据库连接\n# conn=pymysql.connect(user='root',password='root',\n# host='127.0.0.1',port=3306,charset='utf8',db='test1')\n# print(conn)\n#\n# # 定义sql语句\n# sql='select * from user where id=1'\n#\n# # 获取查询数据库对象游标\n# cursor=conn.cursor()\n# # 使用游标执行查询,获取查询结果集\n# res=cursor.execute(sql)\n# print(res)\n# # 解析结果集\n# datas=cursor.fetchmany(res)\n# print(datas)\n#\n\nfrom sqlalchemy import create_engine,select,Table,MetaData,Column,Integer,String\nfrom sqlalchemy import tuple_,delete,update,insert\n# 使用sqlalchemy去连接数据库\n# 获取连接引擎地址\nengine=create_engine('mysql+pymysql://root:root@127.0.0.1:3306/test1')\nprint(engine)\n# 获取连接对象\nconn=engine.connect()\nprint(conn)\n\n# sql='select * from user'\n#\n# res=conn.execute(sql)\n# print(res)\n# datas=res.fetchall()\n# print(datas)\n# 使用MetaData映射创建数据库表\nmata=MetaData(engine)\npeople=Table(\n 'people',mata,\n Column('id',Integer,primary_key=Table,autoincrement=True),\n Column('name',String(50),nullable=False)\n)\n# 将people的表模型映射到数据库\n# people.create()\n# select * from user where id=1 and name=zhangsan\nsql1=select([people]).where(tuple_(people.c.id).in_('1'))\nprint(sql1)\nres=conn.execute(sql1)\nprint(res)\ndatas=res.fetchall()\nprint(datas)\n","sub_path":"flask/pymysql/pymysql_demo1.py","file_name":"pymysql_demo1.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"204537064","text":"#!/usr/bin/python3\n'''\n (C) Copyright 2019-2021 Intel Corporation.\n\n SPDX-License-Identifier: BSD-2-Clause-Patent\n'''\nfrom ec_utils import ErasureCodeFio, check_aggregation_status\nfrom apricot import skipForTicket\n\nclass EcodFioRebuild(ErasureCodeFio):\n # pylint: disable=too-many-ancestors\n # pylint: disable=protected-access\n \"\"\"Test class Description: Runs Fio with EC object type over POSIX and\n verify on-line, off-line for rebuild and verify the data.\n\n :avocado: recursive\n \"\"\"\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize a EcodFioRebuild object.\"\"\"\n super().__init__(*args, **kwargs)\n self.set_online_rebuild = False\n self.rank_to_kill = None\n self.read_option = self.params.get(\"rw_read\", \"/run/fio/test/read_write/*\")\n\n def execution(self, rebuild_mode):\n \"\"\"\n Test execution\n\n Args:\n rebuild_mode: On-line or off-line rebuild mode\n \"\"\"\n # Kill last server rank first\n self.rank_to_kill = self.server_count - 1\n\n if 'on-line' in rebuild_mode:\n # Enabled on-line rebuild for the test\n self.set_online_rebuild = True\n\n # Write the Fio data and kill server if rebuild_mode is on-line\n self.start_online_fio()\n\n # Verify Aggregation should start for Partial stripes IO\n if not any(check_aggregation_status(self.pool).values()):\n self.fail(\"Aggregation failed to start..\")\n\n if 'off-line' in rebuild_mode:\n self.server_managers[0].stop_ranks(\n [self.server_count - 1], self.d_log, force=True)\n\n # Read and verify the original data.\n self.fio_cmd._jobs['test'].rw.value = self.read_option\n self.fio_cmd.run()\n\n # If RF is 2 kill one more server and validate the data is not corrupted.\n if int(self.container.properties.value.split(\":\")[1]) == 2:\n self.log.info(\"RF is 2,So kill another server and verify data\")\n # Kill one more server rank\n self.server_managers[0].stop_ranks([self.server_count - 2],\n self.d_log, force=True)\n # Read and verify the original data.\n self.fio_cmd.run()\n\n @skipForTicket(\"DAOS-8870\")\n def test_ec_online_rebuild_fio(self):\n \"\"\"Jira ID: DAOS-7320.\n\n Test Description:\n Verify the EC works for Fio during on-line rebuild.\n\n Use Cases:\n Create the container with RF:1 or 2.\n Create the Fio data file with verify pattern over Fuse.\n Kill the server when Write is in progress.\n Verify the Fio write finish without any error.\n Wait and verify Aggregation is getting triggered.\n Read and verify the data after Aggregation.\n Kill one more rank and verify the data after rebuild finish.\n\n :avocado: tags=all,full_regression\n :avocado: tags=hw,large,ib2\n :avocado: tags=ec,ec_array,fio,ec_online_rebuild\n :avocado: tags=ec_online_rebuild_fio\n \"\"\"\n self.execution('on-line')\n\n @skipForTicket(\"DAOS-8640\")\n def test_ec_offline_rebuild_fio(self):\n \"\"\"Jira ID: DAOS-7320.\n\n Test Description:\n Verify the EC works for Fio, for off-line rebuild.\n\n Use Cases:\n Create the container with RF:1 or 2.\n Create the Fio data file with verify pattern over Fuse.\n Kill the server and wait for rebuild to finish.\n Wait and verify Aggregation is getting triggered.\n Kill one more rank and verify the data after rebuild finish.\n\n :avocado: tags=all,full_regression\n :avocado: tags=hw,large,ib2\n :avocado: tags=ec,ec_array,fio,ec_offline_rebuild\n :avocado: tags=ec_offline_rebuild_fio\n \"\"\"\n self.execution('off-line')\n","sub_path":"src/tests/ftest/erasurecode/rebuild_fio.py","file_name":"rebuild_fio.py","file_ext":"py","file_size_in_byte":3879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"166312279","text":"import gym\nimport numpy as np\n\nfrom common.common_gym_observation import run_actor_critic_continuous_methods, run_heuristics\n\nenv = gym.make('BipedalWalker-v3')\nrun_actor_critic_continuous_methods(env, 'bipedal_walker')\n\n\ndef bipdeal_walker_heuristic(self, observation):\n moving_s_base = 4 + 5 * self.moving_leg\n supporting_s_base = 4 + 5 * self.supporting_leg\n\n hip_targ = [None, None] # -0.8 .. +1.1\n knee_targ = [None, None] # -0.6 .. +0.9\n hip_todo = [0.0, 0.0]\n knee_todo = [0.0, 0.0]\n\n if self.state == 1: # stay on one leg\n hip_targ[self.moving_leg] = 1.1\n knee_targ[self.moving_leg] = -0.6\n self.supporting_knee_angle += 0.03\n if observation[2] > 0.29: # Max Speed\n self.supporting_knee_angle += 0.03\n self.supporting_knee_angle = min(self.supporting_knee_angle, self.min_supporting_knee_angle)\n knee_targ[self.supporting_leg] = self.supporting_knee_angle\n if observation[supporting_s_base + 0] < 0.10: # supporting leg is behind\n self.state = 2\n if self.state == 2: # Put other down\n hip_targ[self.moving_leg] = 0.1\n knee_targ[self.moving_leg] = self.min_supporting_knee_angle\n knee_targ[self.supporting_leg] = self.supporting_knee_angle\n if observation[moving_s_base + 4]:\n self.state = 3\n self.supporting_knee_angle = min(observation[moving_s_base + 2], self.min_supporting_knee_angle)\n if self.state == 3: # Push Off\n knee_targ[self.moving_leg] = self.supporting_knee_angle\n knee_targ[self.supporting_leg] = +1.0\n if observation[supporting_s_base + 2] > 0.88 or observation[2] > 1.2 * 0.29:\n self.state = 1\n self.moving_leg = 1 - self.moving_leg\n self.supporting_leg = 1 - self.moving_leg\n\n if hip_targ[0]:\n hip_todo[0] = 0.9 * (hip_targ[0] - observation[4]) - 0.25 * observation[5]\n if hip_targ[1]:\n hip_todo[1] = 0.9 * (hip_targ[1] - observation[9]) - 0.25 * observation[10]\n if knee_targ[0]:\n knee_todo[0] = 4.0 * (knee_targ[0] - observation[6]) - 0.25 * observation[7]\n if knee_targ[1]:\n knee_todo[1] = 4.0 * (knee_targ[1] - observation[11]) - 0.25 * observation[12]\n\n hip_todo[0] -= 0.9 * (0 - observation[0]) - 1.5 * observation[1] # PID to keep head strait\n hip_todo[1] -= 0.9 * (0 - observation[0]) - 1.5 * observation[1]\n knee_todo[0] -= 15.0 * observation[3] # vertical speed, to damp oscillations\n knee_todo[1] -= 15.0 * observation[3]\n\n a = np.zeros((4,))\n\n a[0] = hip_todo[0]\n a[1] = knee_todo[0]\n a[2] = hip_todo[1]\n a[3] = knee_todo[1]\n a = np.clip(0.5 * a, -1.0, 1.0)\n\n return a\n\n\nrun_heuristics(env, 'bipedal_walker', bipdeal_walker_heuristic, 0, None, state=1, moving_leg=0, supporting_leg=1,\n supporting_knee_angle=0.1, min_supporting_knee_angle=0.1)\n","sub_path":"openai_gym/bipedal_walker.py","file_name":"bipedal_walker.py","file_ext":"py","file_size_in_byte":2880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"76114608","text":"from sqlalchemy import Column, Integer, String\nfrom sqlalchemy.ext.declarative import declarative_base\n\n\nDeclarativeBase = declarative_base()\n\n\nclass Article(DeclarativeBase):\n\n __tablename__ = \"news\"\n\n id = Column(Integer, primary_key=True)\n title = Column('title', String)\n author = Column('author', String)\n site = Column('site', String)\n url = Column('url', String)\n\n def __repr__(self):\n return \"\".format(self.url, self.title)\n","sub_path":"combinator/api/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"25176363","text":"# build base sub agent\nimport sys\nimport random\nimport math\nimport time\nimport os.path\nimport datetime\n\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\n\nfrom pysc2.lib import actions\n\nfrom utils import BaseAgent\n\n#sub-agents\nfrom agent_base_attack import BaseAttack\nfrom agent_army_attack import ArmyAttack\n\n# shared data\nfrom agent_base_attack import SharedDataBaseAttack\nfrom agent_army_attack import SharedDataArmyAttack\n\n# sc2 utils\nfrom utils import TerranUnit\nfrom utils import SC2_Params\nfrom utils import SC2_Actions\n\n#decision makers\nfrom utils_decisionMaker import LearnWithReplayMngr\nfrom utils_decisionMaker import UserPlay\nfrom utils_decisionMaker import BaseDecisionMaker\n\n\nfrom utils_results import ResultFile\nfrom utils_results import PlotResults\n\n# params\nfrom utils_dqn import DQN_PARAMS\nfrom utils_dqn import DQN_EMBEDDING_PARAMS\nfrom utils_qtable import QTableParams\nfrom utils_qtable import QTableParamsExplorationDecay\n\nfrom utils import SwapPnt\nfrom utils import DistForCmp\nfrom utils import CenterPoints\n\nSTEP_DURATION = 0\n\n# possible types of play\nAGENT_DIR = \"BattleMngr/\"\nAGENT_NAME = \"battle_mngr\"\n\nQTABLE = 'q'\nDQN = 'dqn'\nDQN_EMBEDDING_LOCATIONS = 'dqn_Embedding' \nNAIVE_DECISION = 'naive'\n\nUSER_PLAY = 'play'\n\nALL_TYPES = set([USER_PLAY, QTABLE, DQN, DQN_EMBEDDING_LOCATIONS, NAIVE_DECISION])\n\nGRID_SIZE = 5\n\nACTION_DO_NOTHING = 0\nACTION_ARMY_BATTLE = 1\nACTION_BASE_BATTLE = 2\nNUM_ACTIONS = 3\n\nSUB_AGENT_ARMY_BATTLE = ACTION_ARMY_BATTLE\nSUB_AGENT_BASE_BATTLE = ACTION_BASE_BATTLE\nALL_SUB_AGENTS = [SUB_AGENT_ARMY_BATTLE, SUB_AGENT_BASE_BATTLE]\n\nSUBAGENTS_NAMES = {}\nSUBAGENTS_NAMES[SUB_AGENT_ARMY_BATTLE] = \"ArmyAttack\"\nSUBAGENTS_NAMES[SUB_AGENT_BASE_BATTLE] = \"BaseAttack\"\n\n\nACTION2STR = {}\nACTION2STR[ACTION_DO_NOTHING] = \"Do_Nothing\"\nACTION2STR[ACTION_ARMY_BATTLE] = \"Army_Battle\"\nACTION2STR[ACTION_BASE_BATTLE] = \"Base_Battle\"\n\nclass STATE:\n START_SELF_MAT = 0\n END_SELF_MAT = GRID_SIZE * GRID_SIZE\n \n START_ENEMY_ARMY_MAT = END_SELF_MAT\n END_ENEMY_ARMY_MAT = START_ENEMY_ARMY_MAT + GRID_SIZE * GRID_SIZE\n \n START_ENEMY_BUILDING_MAT = END_ENEMY_ARMY_MAT\n END_ENEMY_BUILDING_MAT = START_ENEMY_BUILDING_MAT + GRID_SIZE * GRID_SIZE\n\n TIME_LINE_IDX = END_ENEMY_BUILDING_MAT\n\n SIZE = TIME_LINE_IDX + 1\n\n TIME_LINE_BUCKETING = 25\n\n\n# data for run type\nTYPE = \"type\"\nDECISION_MAKER_NAME = \"dm_name\"\nHISTORY = \"hist\"\nRESULTS = \"results\"\nPARAMS = 'params'\nDIRECTORY = 'directory'\n\n# table names\nRUN_TYPES = {}\n\n\nRUN_TYPES[QTABLE] = {}\nRUN_TYPES[QTABLE][TYPE] = \"QLearningTable\"\nRUN_TYPES[QTABLE][DIRECTORY] = \"battleMngr_qtable\"\nRUN_TYPES[QTABLE][PARAMS] = QTableParamsExplorationDecay(STATE.SIZE, NUM_ACTIONS)\nRUN_TYPES[QTABLE][DECISION_MAKER_NAME] = \"battleMngr_q_qtable\"\nRUN_TYPES[QTABLE][HISTORY] = \"battleMngr_q_replayHistory\"\nRUN_TYPES[QTABLE][RESULTS] = \"battleMngr_q_result\"\n\nRUN_TYPES[DQN] = {}\nRUN_TYPES[DQN][TYPE] = \"DQN_WithTarget\"\nRUN_TYPES[DQN][DIRECTORY] = \"battleMngr_dqn\"\nRUN_TYPES[DQN][PARAMS] = DQN_PARAMS(STATE.SIZE, NUM_ACTIONS)\nRUN_TYPES[DQN][DECISION_MAKER_NAME] = \"battleMngr_dqn_DQN\"\nRUN_TYPES[DQN][HISTORY] = \"battleMngr_dqn_replayHistory\"\nRUN_TYPES[DQN][RESULTS] = \"battleMngr_dqn_result\"\n\nRUN_TYPES[DQN_EMBEDDING_LOCATIONS] = {}\nRUN_TYPES[DQN_EMBEDDING_LOCATIONS][TYPE] = \"DQN_WithTarget\"\nRUN_TYPES[DQN][DIRECTORY] = \"battleMngr_dqn_Embedding\"\nRUN_TYPES[DQN_EMBEDDING_LOCATIONS][PARAMS] = DQN_EMBEDDING_PARAMS(STATE.SIZE, STATE.END_ENEMY_BUILDING_MAT, NUM_ACTIONS)\nRUN_TYPES[DQN_EMBEDDING_LOCATIONS][DECISION_MAKER_NAME] = \"battleMngr_dqn_Embedding_DQN\"\nRUN_TYPES[DQN_EMBEDDING_LOCATIONS][HISTORY] = \"battleMngr_dqn_Embedding_replayHistory\"\nRUN_TYPES[DQN_EMBEDDING_LOCATIONS][RESULTS] = \"battleMngr_dqn_Embedding_result\"\n\n\n\nclass SharedDataBattle(SharedDataArmyAttack, SharedDataBaseAttack):\n def __init__(self):\n super(SharedDataBattle, self).__init__()\n\n\nclass NaiveDecisionMakerBattleMngr(BaseDecisionMaker):\n def __init__(self):\n super(NaiveDecisionMakerBattleMngr, self).__init__(AGENT_NAME) \n self.startEnemyMat = GRID_SIZE * GRID_SIZE\n self.startBuildingMat = 2 * GRID_SIZE * GRID_SIZE\n self.endBuildingMat = 3 * GRID_SIZE * GRID_SIZE\n\n self.numActions = 3\n\n def choose_action(self, observation):\n if (observation[self.startEnemyMat:self.startBuildingMat] > 0).any():\n return ACTION_ARMY_BATTLE\n elif (observation[self.startBuildingMat:self.endBuildingMat] > 0).any():\n return ACTION_BASE_BATTLE\n else:\n return ACTION_DO_NOTHING\n\n def ActionValuesVec(self, state, target = True):\n vals = np.zeros(self.numActions,dtype = float)\n vals[self.choose_action(state)] = 1.0\n\n return vals\n\n\n\nclass BattleMngr(BaseAgent):\n def __init__(self, sharedData, dmTypes, decisionMaker, isMultiThreaded, playList, trainList): \n super(BattleMngr, self).__init__(STATE.SIZE)\n self.playAgent = (AGENT_NAME in playList) | (\"inherit\" in playList)\n if self.playAgent:\n saPlayList = [\"inherit\"]\n else:\n saPlayList = playList\n\n self.trainAgent = AGENT_NAME in trainList\n\n self.illigalmoveSolveInModel = True\n\n if decisionMaker != None:\n self.decisionMaker = decisionMaker\n else:\n self.decisionMaker = self.CreateDecisionMaker(dmTypes, isMultiThreaded)\n\n self.history = self.decisionMaker.AddHistory()\n\n self.sharedData = sharedData\n self.subAgents = {}\n for key, name in SUBAGENTS_NAMES.items():\n saClass = eval(name)\n saDM = self.decisionMaker.GetSubAgentDecisionMaker(key)\n self.subAgents[key] = saClass(sharedData, dmTypes, saDM, isMultiThreaded, saPlayList, trainList)\n self.decisionMaker.SetSubAgentDecisionMaker(key, self.subAgents[key].GetDecisionMaker())\n\n if not self.playAgent:\n self.subAgentPlay = self.FindActingHeirarchi()\n self.activeSubAgents = [self.subAgentPlay]\n else: \n self.activeSubAgents = ALL_SUB_AGENTS\n\n\n self.current_action = None\n self.armyExist = True\n self.buildingsExist = True\n # state and actions:\n\n self.state_startSelfMat = 0\n self.state_startEnemyMat = GRID_SIZE * GRID_SIZE\n self.state_startBuildingMat = 2 * GRID_SIZE * GRID_SIZE\n self.state_timeLineIdx = 3 * GRID_SIZE * GRID_SIZE\n\n self.state_size = 3 * GRID_SIZE * GRID_SIZE + 1\n\n self.terminalState = np.zeros(self.state_size, dtype=np.int, order='C')\n \n\n def CreateDecisionMaker(self, dmTypes, isMultiThreaded):\n if dmTypes[AGENT_NAME] == \"naive\":\n decisionMaker = NaiveDecisionMakerBattleMngr()\n else:\n runType = RUN_TYPES[dmTypes[AGENT_NAME]]\n\n # create agent dir\n directory = dmTypes[\"directory\"] + \"/\" + AGENT_DIR\n if not os.path.isdir(\"./\" + directory):\n os.makedirs(\"./\" + directory)\n decisionMaker = LearnWithReplayMngr(modelType=runType[TYPE], modelParams = runType[PARAMS], decisionMakerName = runType[DECISION_MAKER_NAME], agentName=AGENT_NAME, \n resultFileName=runType[RESULTS], historyFileName=runType[HISTORY], directory=AGENT_DIR+runType[DIRECTORY], isMultiThreaded=isMultiThreaded)\n\n return decisionMaker\n\n def GetDecisionMaker(self):\n return self.decisionMaker\n\n def FindActingHeirarchi(self):\n if self.playAgent:\n return 1\n\n for key, sa in self.subAgents.items():\n if sa.FindActingHeirarchi() >= 0:\n return key\n\n return -1\n\n def FirstStep(self, obs): \n super(BattleMngr, self).FirstStep()\n\n self.current_state = np.zeros(self.state_size, dtype=np.int, order='C')\n self.previous_state = np.zeros(self.state_size, dtype=np.int, order='C')\n \n self.subAgentsActions = {}\n for sa in range(NUM_ACTIONS):\n self.subAgentsActions[sa] = None\n \n for sa in SUBAGENTS_NAMES.keys():\n self.subAgentsActions[sa] = self.subAgents[sa].FirstStep(obs) \n\n def EndRun(self, reward, score, stepNum):\n if self.trainAgent:\n self.decisionMaker.end_run(reward, score, stepNum)\n \n for sa in ALL_SUB_AGENTS:\n self.subAgents[sa].EndRun(reward, score, stepNum)\n\n \n def Learn(self, reward, terminal):\n if self.trainAgent:\n if self.isActionCommitted:\n self.history.learn(self.previous_state, self.lastActionCommitted, reward, self.current_state, terminal)\n elif terminal:\n # if terminal reward entire state if action is not chosen for current step\n for a in range(NUM_ACTIONS):\n self.history.learn(self.previous_state, a, reward, self.terminalState, terminal)\n self.history.learn(self.current_state, a, reward, self.terminalState, terminal)\n\n for sa in self.activeSubAgents:\n self.subAgents[sa].Learn(reward, terminal)\n\n self.previous_state[:] = self.current_state[:]\n self.isActionCommitted = False\n\n def ChooseAction(self):\n for sa in self.activeSubAgents:\n self.subAgents[sa].ChooseAction() \n \n if self.playAgent:\n if self.illigalmoveSolveInModel:\n validActions = self.ValidActions()\n if self.trainAgent:\n targetValues = False\n exploreProb = self.decisionMaker.ExploreProb() \n else:\n targetValues = True\n exploreProb = self.decisionMaker.TargetExploreProb() \n\n if np.random.uniform() > exploreProb:\n valVec = self.decisionMaker.ActionValuesVec(self.current_state, targetValues) \n random.shuffle(validActions)\n validVal = valVec[validActions]\n action = validActions[validVal.argmax()]\n else:\n action = np.random.choice(validActions) \n else:\n action = self.decisionMaker.choose_action(self.current_state)\n else:\n action = self.subAgentPlay\n\n self.current_action = action\n return action\n\n def Action2Str(self, a):\n if a == ACTION_DO_NOTHING:\n return ACTION2STR[a]\n else:\n return ACTION2STR[a] + \"-->\" + self.subAgents[a].Action2Str(self.subAgentsActions[a])\n\n def IsDoNothingAction(self, a):\n return self.subAgents[a].IsDoNothingAction(self.subAgentsActions[a])\n\n def Action2SC2Action(self, obs, moveNum):\n print(\"\\n\\nBattle mngr action\\n\\n\")\n if moveNum == 0:\n self.CreateState(obs)\n self.Learn()\n self.ChooseAction()\n\n self.isActionCommitted = True\n self.lastActionCommitted = self.current_action\n return self.subAgents[self.current_action].Action2SC2Action(obs, self.subAgentsActions[self.current_action], moveNum)\n\n def CreateState(self, obs):\n for sa in ALL_SUB_AGENTS:\n self.subAgents[sa].CreateState()\n\n self.current_state = np.zeros(self.state_size, dtype=np.int, order='C')\n \n self.GetSelfLoc(obs)\n for idx in range(GRID_SIZE * GRID_SIZE):\n self.current_state[STATE.START_ENEMY_BUILDING_MAT + idx] = self.sharedData.enemyBuildingMat[idx]\n self.current_state[STATE.START_ENEMY_ARMY_MAT + idx] = self.sharedData.enemyArmyMat[idx]\n\n #self.GetEnemyBuildingLoc(obs)\n self.current_state[self.state_timeLineIdx] = self.sharedData.numStep\n\n def GetSelfLoc(self, obs):\n playerType = obs.observation[\"feature_screen\"][SC2_Params.PLAYER_RELATIVE]\n unitType = obs.observation[\"feature_screen\"][SC2_Params.UNIT_TYPE]\n\n allArmy_y = []\n allArmy_x = [] \n for key, spec in TerranUnit.ARMY_SPEC.items():\n s_y, s_x = ((playerType == SC2_Params.PLAYER_SELF) &(unitType == key)).nonzero()\n allArmy_y += list(s_y)\n allArmy_x += list(s_x)\n \n selfPoints, selfPower = CenterPoints(s_y, s_x)\n\n\n for i in range(len(selfPoints)):\n idx = self.GetScaledIdx(selfPoints[i])\n power = math.ceil(selfPower[i] / spec.numScreenPixels)\n self.current_state[STATE.START_SELF_MAT + idx] += power\n\n if len(allArmy_y) > 0:\n self.selfLocCoord = [int(sum(allArmy_y) / len(allArmy_y)), int(sum(allArmy_x) / len(allArmy_x))]\n\n def GetEnemyArmyLoc(self, obs):\n playerType = obs.observation[\"feature_screen\"][SC2_Params.PLAYER_RELATIVE]\n unitType = obs.observation[\"feature_screen\"][SC2_Params.UNIT_TYPE]\n\n enemyPoints = []\n enemyPower = []\n for unit, spec in TerranUnit.ARMY_SPEC.items():\n enemyArmy_y, enemyArmy_x = ((unitType == unit) & (playerType == SC2_Params.PLAYER_HOSTILE)).nonzero()\n unitPoints, unitPower = CenterPoints(enemyArmy_y, enemyArmy_x, spec.numScreenPixels)\n enemyPoints += unitPoints\n enemyPower += unitPower\n \n self.armyExist = False\n for i in range(len(enemyPoints)):\n self.armyExist = True\n idx = self.GetScaledIdx(enemyPoints[i])\n self.current_state[self.state_startEnemyMat + idx] += enemyPower[i]\n\n def GetEnemyBuildingLoc(self, obs):\n playerType = obs.observation[\"feature_screen\"][SC2_Params.PLAYER_RELATIVE]\n unitType = obs.observation[\"feature_screen\"][SC2_Params.UNIT_TYPE]\n\n enemyBuildingPoints = []\n enemyBuildingPower = []\n for unit, spec in TerranUnit.BUILDING_SPEC.items():\n enemyArmy_y, enemyArmy_x = ((unitType == unit) & (playerType == SC2_Params.PLAYER_HOSTILE)).nonzero()\n buildingPoints, buildingPower = CenterPoints(enemyArmy_y, enemyArmy_x, spec.numScreenPixels)\n enemyBuildingPoints += buildingPoints\n enemyBuildingPower += buildingPower # * self.BuildingValues[spec.name]\n \n\n \n self.buildingsExist = False\n for i in range(len(enemyBuildingPoints)):\n self.buildingsExist = True\n idx = self.GetScaledIdx(enemyBuildingPoints[i])\n self.current_state[self.state_startBuildingMat + idx] += enemyBuildingPower[i]\n \n \n def GetScaledIdx(self, screenCord):\n locX = screenCord[SC2_Params.X_IDX]\n locY = screenCord[SC2_Params.Y_IDX]\n\n yScaled = int((locY / SC2_Params.SCREEN_SIZE) * GRID_SIZE)\n xScaled = int((locX / SC2_Params.SCREEN_SIZE) * GRID_SIZE)\n\n return xScaled + yScaled * GRID_SIZE\n \n def Closest2Self(self, p1, p2):\n d1 = DistForCmp(p1, self.selfLocCoord)\n d2 = DistForCmp(p2, self.selfLocCoord)\n if d1 < d2:\n return p1\n else:\n return p2\n \n def ValidActions(self):\n valid = [ACTION_DO_NOTHING]\n if self.armyExist:\n valid.append(ACTION_ARMY_BATTLE)\n if self.buildingsExist:\n valid.append(ACTION_BASE_BATTLE)\n \n return valid\n\n def PrintState(self):\n print(\"\\nAttack action =\", self.Action2Str())\n print(\"\\n\\nstate: timeline =\", self.current_state[self.state_timeLineIdx])\n for y in range(GRID_SIZE):\n for x in range(GRID_SIZE):\n idx = self.state_startSelfMat + x + y * GRID_SIZE\n if self.current_state[idx] < 10:\n print(self.current_state[idx], end = ' ')\n else:\n print(self.current_state[idx], end = ' ')\n \n print(end = ' | ')\n \n for x in range(GRID_SIZE):\n idx = self.state_startEnemyMat + x + y * GRID_SIZE\n if self.current_state[idx] < 10:\n print(self.current_state[idx], end = ' ')\n else:\n print(self.current_state[idx], end = ' ')\n\n print(end = ' | ')\n \n for x in range(GRID_SIZE):\n idx = self.state_startBuildingMat + x + y * GRID_SIZE\n if self.current_state[idx] < 10:\n print(self.current_state[idx], end = ' ')\n else:\n print(self.current_state[idx], end = ' ')\n\n print('||')\n\n\nif __name__ == \"__main__\":\n if \"results\" in sys.argv:\n PlotResults(AGENT_NAME, AGENT_DIR, RUN_TYPES)","sub_path":"agent_battle_mngr.py","file_name":"agent_battle_mngr.py","file_ext":"py","file_size_in_byte":16570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"95213039","text":"# Copyright 2017 Jacques Berger\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport sqlite3\n\n\nclass Database:\n def __init__(self):\n self.connection = None\n\n def get_connection(self):\n if self.connection is None:\n self.connection = sqlite3.connect('db/database.db')\n #self.connection = sqlite3.connect('/home/ju/Documents/Ecole/2018_hivers/web II/INF3005/tp1/tp1/db/database.db')\n return self.connection\n\n\n def disconnect(self):\n if self.connection is not None:\n self.connection.close()\n\n\n def create_matricule(self, matricule, code_de_projet, date_publication, duree):\n connection = self.get_connection()\n connection.execute((\"insert into heures(matricule, code_de_projet, date_publication, duree)\"\n \" values(?, ?, ?, ?)\"), (matricule, code_de_projet, date_publication,\n duree))\n connection.commit()\n\n\n def get_matricule_info(self, matricule):\n cursor = self.get_connection().cursor()\n cursor.execute((\"select * from heures where matricule=?\"),\n (matricule,))\n matricule = cursor.fetchone()\n if matricule is None:\n return None\n else:\n return matricule[0], matricule[1]\n\n\n def get_matricule_dates(self, matricule):\n cursor = self.get_connection().cursor()\n cursor.execute((\"select date_publication from heures where matricule=?\"),\n (matricule,))\n dates = cursor.fetchall()\n if dates is None:\n return None\n else:\n return [date[0] for date in dates]\n\n\n def get_date_du_jour_info(self, matricule, date_du_jour):\n cursor = self.get_connection().cursor()\n cursor.execute((\"select * from heures where matricule=? AND date_publication=?\"),\n (matricule,date_du_jour))\n infos = cursor.fetchall()\n if infos is None:\n return None\n else:\n return [(info[0], info[1], info[2], info[3], info[4]) for info in infos]\n\n\n def get_mois_info(self, matricule, date_du_jour):\n date_debut= date_du_jour+\"-00\"\n date_fin= date_du_jour+\"-31\"\n cursor = self.get_connection().cursor()\n cursor.execute((\"select * from heures where matricule=? AND date_publication BETWEEN ? and ?\"),\n (matricule,date_debut, date_fin))\n infos = cursor.fetchall()\n if infos is None:\n return None\n else:\n return [(info[0], info[1], info[2], info[3], info[4]) for info in infos]\n\n\n def delete_id(self,id):\n connection = self.get_connection()\n cursor = connection.cursor()\n cursor.execute((\"delete from heures where id=? \"),(id,))\n connection.commit()\n\n\n def update_id(self,request_json):\n id = request_json['id']\n duree = request_json['duree']\n date_publication = request_json['date_publication']\n code_projet = request_json['code_projet']\n matricule = request_json['matricule']\n\n connection = self.get_connection()\n cursor = connection.cursor()\n cursor.execute((\"update heures set matricule=?, code_de_projet=?, date_publication =?, duree = ? where id=? \"),(matricule,code_projet,date_publication, duree, id,))\n connection.commit()\n","sub_path":"tp1/tp1/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":3897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"460131589","text":"import RPi.GPIO as GPIO\nimport time\n#RECEPCION DE PAQUETES \nimport sys\nfrom flask import Flask\nfrom flask_sockets import Sockets\napp = Flask(__name__)\nsockets = Sockets(app)\n#CONTROL DE SERVO CON LIBRERIA RPI\nservoPIN = 17\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(servoPIN, GPIO.OUT)\ncount=30\np = GPIO.PWM(servoPIN, 50) \np.start(7.5)\ndef angle(ang):\n\tduty=1./18.*(ang)+1\n\tif duty > 11:\n\t\tduty=7\n\treturn duty\n\ndef mapazimut(ang_raw,in_min,in_max,out_min,out_max):\n\tangle=(ang_raw-in_min)*(out_max-out_min)/(in_max-in_min)+out_min\n\treturn angle\n\n@sockets.route('/orientation')\ndef echo_socket(ws):\n try:\n f=open(\"orientation.txt\",\"a\") \n while True:\n message = ws.receive()\n splitdata=message.split(\",\")\n azimut=float(splitdata[0])\n #print(message)\n ws.send(message)\n print>>f,message\n \n azimap_raw=azimut\n azimap=int(azimap_raw)\n desireang=mapazimut(azimap,44,240,30,180)\n print (desireang,azimut)\n p.ChangeDutyCycle(angle(desireang))\n #time.sleep(0.1)\n f.close()\n except KeyboardInterrupt:\n print(\"Bye Bye\")\n p.stop()\n GPIO.cleanup()\n@app.route('/')\ndef hello():\n\treturn 'Hello World!'\n\nif __name__ == \"__main__\":\n\tfrom gevent import pywsgi\n\tfrom geventwebsocket.handler import WebSocketHandler\n\tserver = pywsgi.WSGIServer(('0.0.0.0', 5000), app, handler_class=WebSocketHandler)\n\tserver.serve_forever()\n","sub_path":"Wallicontrol/Scripts sin uso/servo1.py","file_name":"servo1.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"99564102","text":"import requests\nfrom pyramid.security import (\n remember, forget,\n Allow, Authenticated, DENY_ALL,\n)\nimport sqlalchemy as sa\nfrom sqlalchemy.orm import joinedload\nfrom pyramid_rpc.jsonrpc import jsonrpc_method\n\nfrom ..models import (\n mallows,\n AuthorizedUser,\n AuditEntry,\n Thread,\n ThreadPage,\n ThreadPost,\n Writeup,\n WriteupPost,\n WriteupPostVersion,\n )\n\nfrom ..lib.extract import extract_post_from_wpv\n\n\n@jsonrpc_method(endpoint='api')\ndef whoami(request):\n return request.authenticated_userid\n\n\n@jsonrpc_method(endpoint='api')\ndef login(request, assertion):\n audience = request.registry.settings['persona_audience']\n data = {'assertion': assertion, 'audience': audience}\n resp = requests.post('https://verifier.login.persona.org/verify', data=data, verify=True)\n if resp.ok:\n verification_data = resp.json()\n if verification_data['status'] == 'okay':\n email = verification_data['email']\n au = request.db_session.query(AuthorizedUser).filter_by(email=email).one_or_none()\n if au is not None:\n headers = remember(request, email)\n response = request.response\n response.headerlist.extend(headers)\n return {'result': 'ok', 'email': email}\n raise ValueError('Nope')\n\n\n@jsonrpc_method(endpoint='api')\ndef logout(request):\n headers = forget(request)\n response = request.response\n response.headerlist.extend(headers)\n return {}\n\n\n@jsonrpc_method(endpoint='api', permission='admin')\ndef thread_info(request):\n query = request.db_session.query(Thread).order_by(Thread.id.asc())\n max_tp = request.db_session.query(sa.func.max(WriteupPostVersion.threadpost_id)).scalar()\n tp = request.db_session.query(ThreadPost).filter_by(id=max_tp).one()\n return {\n 'threads': [{\n 'id': t.id,\n 'page_count': t.page_count,\n 'closed': t.closed,\n } for t in query],\n 'last_extracted_post': {\n 'thread_id': tp.page.thread.id,\n 'page_num': tp.page.page_num,\n 'post_id': tp.id,\n }\n }\n\n\n@jsonrpc_method(endpoint='api', permission='admin')\ndef thread_page(request, thread_id, page_num):\n tp = request.db_session.query(ThreadPage)\\\n .filter_by(thread_id=thread_id, page_num=page_num)\\\n .options(joinedload(ThreadPage.posts), joinedload(ThreadPage.thread))\\\n .one()\n schema = mallows.ThreadPost(many=True)\n return {\n 'thread_id': tp.thread_id,\n 'page_num': tp.page_num,\n 'page_count': tp.thread.page_count,\n 'posts': schema.dump(tp.posts).data,\n }\n\n\n@jsonrpc_method(endpoint='api', permission='admin')\ndef extracted_list(request):\n query = request.db_session.query(WriteupPostVersion)\\\n .filter_by(writeup_post=None)\\\n .order_by(WriteupPostVersion.id.asc())\n schema = mallows.WriteupPostVersion(many=True)\n schema.context = {'request': request}\n return schema.dump(query.all()).data\n\n\n@jsonrpc_method(endpoint='api', permission='admin')\ndef writeup_list(request):\n query = request.db_session.query(Writeup)\\\n .order_by(Writeup.title.asc(), Writeup.author_slug.asc())\\\n .options(joinedload(Writeup.posts))\n schema = mallows.Writeup(many=True)\n return schema.dump(query.all()).data\n\n\n@jsonrpc_method(endpoint='api', permission='admin')\ndef writeup_detail(request, id):\n query = request.db_session.query(Writeup)\\\n .filter(Writeup.id == id)\\\n .options(joinedload(Writeup.posts))\n schema = mallows.Writeup()\n return schema.dump(query.one()).data\n\n\n@jsonrpc_method(endpoint='api', permission='admin')\ndef save_writeup(request, writeup):\n obj = request.db_session.query(Writeup)\\\n .filter(Writeup.id == writeup['id'])\\\n .one()\n for unsettable in ['id', 'posts', 'author_slug', 'writeup_slug']:\n if unsettable in writeup:\n del writeup[unsettable]\n for key, value in writeup.items():\n setattr(obj, key, value)\n request.db_session.flush()\n user = request.db_session.query(AuthorizedUser).filter_by(email=request.authenticated_userid).one()\n audit = AuditEntry(\n user=user, timestamp=sa.func.now(),\n text=\"edited {!r}\".format(obj))\n request.db_session.add(audit)\n schema = mallows.Writeup()\n return schema.dump(obj).data\n\n\n@jsonrpc_method(endpoint='api', permission='admin')\ndef post_detail(request, writeup_id, post_index):\n query = request.db_session.query(WriteupPost)\\\n .filter(WriteupPost.writeup_id == writeup_id)\\\n .filter(WriteupPost.index == post_index)\\\n .options(joinedload(WriteupPost.versions))\n schema = mallows.WriteupPost()\n schema.context = {'request': request}\n return schema.dump(query.one()).data\n\n\n@jsonrpc_method(endpoint='api', permission='admin')\ndef save_post(request, post):\n obj = request.db_session.query(WriteupPost).filter(WriteupPost.id == post['id']).one()\n\n # TODO: switch to marshmallow\n for unsettable in ['id', 'versions', 'active_version', 'writeup']:\n if unsettable in post:\n del post[unsettable]\n for key, value in post.items():\n setattr(obj, key, value)\n\n request.db_session.flush()\n user = request.db_session.query(AuthorizedUser).filter_by(email=request.authenticated_userid).one()\n audit = AuditEntry(\n user=user, timestamp=sa.func.now(),\n text=\"edited {!r}\".format(obj))\n request.db_session.add(audit)\n schema = mallows.WriteupPost()\n schema.context = {'request': request}\n return schema.dump(obj).data\n\n\n@jsonrpc_method(endpoint='api', permission='admin')\ndef activate_version(request, wpv_id):\n wpv = request.db_session.query(WriteupPostVersion).filter_by(id=wpv_id).one()\n post = wpv.writeup_post\n for version in post.versions:\n if version is not wpv:\n version.active = False\n wpv.active = True\n\n request.db_session.flush()\n user = request.db_session.query(AuthorizedUser).filter_by(email=request.authenticated_userid).one()\n audit = AuditEntry(\n user=user, timestamp=sa.func.now(),\n text=\"activated {!r}\".format(wpv))\n request.db_session.add(audit)\n schema = mallows.WriteupPostVersion()\n schema.context = {'request': request}\n return schema.dump(wpv).data\n\n\n@jsonrpc_method(endpoint='api', permission='admin')\ndef get_wpv(request, wpv_id):\n query = request.db_session.query(WriteupPostVersion)\\\n .filter_by(id=wpv_id)\n schema = mallows.WriteupPostVersion()\n schema.context = {'request': request}\n return schema.dump(query.one()).data\n\n\n@jsonrpc_method(endpoint='api', permission='admin')\ndef save_wpv(request, wpv_data):\n wp = request.db_session.query(WriteupPost).filter_by(id=wpv_data['writeuppost_id']).one()\n tp = request.db_session.query(ThreadPost).filter_by(id=wpv_data['threadpost_id']).one()\n for _wpv in wp.versions:\n _wpv.active = False\n new_version = max([_wpv.version for _wpv in wp.versions]) + 1\n wpv = WriteupPostVersion()\n wpv.writeup_post = wp\n wpv.thread_post = tp\n wpv.html = wpv_data['html']\n wpv.created_at = sa.func.now()\n wpv.version = new_version\n wpv.active = True\n wpv.edit_summary = wpv_data['edit_summary']\n\n request.db_session.flush()\n\n # reprocess images\n extract_post_from_wpv(request, wpv, self_html=True)\n\n user = request.db_session.query(AuthorizedUser).filter_by(email=request.authenticated_userid).one()\n audit = AuditEntry(\n user=user, timestamp=sa.func.now(),\n text=\"edited {!r}\".format(wpv))\n request.db_session.add(audit)\n schema = mallows.WriteupPostVersion()\n schema.context = {'request': request}\n return schema.dump(wpv).data\n\n\n@jsonrpc_method(endpoint='api', permission='admin')\ndef extract_post(request, thread_post_id):\n tp = request.db_session.query(ThreadPost).filter_by(id=thread_post_id).one()\n wpv = WriteupPostVersion(\n thread_post=tp, created_at=sa.func.now(),\n edit_summary=\"Extracted from post {}\".format(tp.id))\n request.db_session.add(wpv)\n extract_post_from_wpv(request, wpv)\n request.db_session.flush()\n user = request.db_session.query(AuthorizedUser).filter_by(email=request.authenticated_userid).one()\n audit = AuditEntry(\n user=user, timestamp=sa.func.now(),\n text=\"created an extracted post from {!r}\".format(tp))\n request.db_session.add(audit)\n schema = mallows.WriteupPostVersion()\n schema.context = {'request': request}\n return schema.dump(wpv).data\n\n\n@jsonrpc_method(endpoint='api', permission='admin')\ndef delete_extracted(request, wpv_id):\n wpv = request.db_session.query(WriteupPostVersion).filter_by(id=wpv_id, writeup_post=None).one()\n user = request.db_session.query(AuthorizedUser).filter_by(email=request.authenticated_userid).one()\n audit = AuditEntry(\n user=user, timestamp=sa.func.now(),\n text=\"deleted an extracted post from {!r}\".format(wpv.thread_post))\n request.db_session.add(audit)\n request.db_session.delete(wpv)\n\n\n@jsonrpc_method(endpoint='api', permission='admin')\ndef attach_extracted(request, wpv_id, target):\n user = request.db_session.query(AuthorizedUser).filter_by(email=request.authenticated_userid).one()\n wpv = request.db_session.query(WriteupPostVersion).filter_by(id=wpv_id, writeup_post=None).one()\n\n schema = mallows.InputVersionExistingPost()\n result = schema.load(target)\n if not result.errors:\n w = request.db_session.query(Writeup).filter_by(id=result.data['w_id']).one()\n wp = request.db_session.query(WriteupPost).filter_by(writeup=w, index=result.data['wp_index']).one()\n wpv.version = max([x.version for x in wp.versions]) + 1\n\n wp.versions.append(wpv)\n request.db_session.flush()\n audit = AuditEntry(\n user=user, timestamp=sa.func.now(),\n text=\"attached {!r} to writeup\".format(wpv))\n request.db_session.add(audit)\n schema = mallows.WriteupPostVersion()\n schema.context = {'request': request}\n return schema.dump(wpv).data\n\n schema = mallows.InputVersionNewPost()\n result = schema.load(target)\n if not result.errors:\n w = request.db_session.query(Writeup).filter_by(id=result.data['w_id']).one()\n new_index = len(w.posts) + 1\n wp = WriteupPost(\n author=wpv.thread_post.author,\n index=new_index,\n ordinal='{}'.format(new_index),\n title=result.data['wp_title'],\n published=True\n )\n w.posts.append(wp)\n\n wp.versions.append(wpv)\n wpv.active = True\n request.db_session.flush()\n audit = AuditEntry(\n user=user, timestamp=sa.func.now(),\n text=\"attached {!r} to writeup\".format(wpv))\n request.db_session.add(audit)\n schema = mallows.WriteupPostVersion()\n schema.context = {'request': request}\n return schema.dump(wpv).data\n\n schema = mallows.InputVersionNewWriteup()\n result = schema.load(target)\n if not result.errors:\n w = Writeup(\n author_slug=Writeup.slugify(result.data['w_author']),\n writeup_slug=Writeup.slugify(result.data['w_title']),\n title=result.data['w_title'],\n status='ongoing',\n published=True,\n )\n request.db_session.add(w)\n\n wp = WriteupPost(\n author=wpv.thread_post.author,\n index=1,\n ordinal='1',\n title=result.data['wp_title'],\n published=True\n )\n w.posts.append(wp)\n\n wp.versions.append(wpv)\n wpv.active = True\n request.db_session.flush()\n audit = AuditEntry(\n user=user, timestamp=sa.func.now(),\n text=\"attached {!r} to writeup\".format(wpv))\n request.db_session.add(audit)\n schema = mallows.WriteupPostVersion()\n schema.context = {'request': request}\n retval = schema.dump(wpv).data\n return retval\n raise ValueError(result.errors)\n\n\n@jsonrpc_method(endpoint='api')\ndef say_hello(request, name):\n return \"Hello, {}!\".format(name)\n\n\nclass AdminContext(object):\n __acl__ = [\n (Allow, Authenticated, 'admin'),\n DENY_ALL\n ]\n\n def __init__(self, request):\n pass\n\n\ndef includeme(config):\n config.include('pyramid_rpc.jsonrpc')\n config.add_jsonrpc_endpoint('api', '/api', default_renderer='json', factory=AdminContext)\n config.scan()\n","sub_path":"mimir/api/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":12496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"511558149","text":"import prompt,re\r\nimport math\r\nfrom goody import type_as_str\r\n\r\ndef expand_re(pat_dict:{str:str}):\r\n pass\r\n \r\nclass Point:\r\n \r\n def __init__(self,x,y):\r\n self.x = x\r\n self.y = y\r\n if type(self.x) is not int:\r\n raise AssertionError(\"x{} is {} and not of type int\".format((self.x),type_as_str(self.y)))\r\n if type(self.y) is not int:\r\n raise AssertionError(\"y{} is {} and not of type int\".format((self.y),type_as_str(self.y)))\r\n\r\n def __repr__(self):\r\n return \"Point({},{})\".format(self.x, self.y)\r\n\r\n\r\n def __str__(self):\r\n return \"(x={},y={})\".format(self.x, self.y)\r\n \r\n\r\n def __bool__(self):\r\n if self.x == 0:\r\n if self.y == 0:\r\n return False\r\n else:\r\n return True \r\n \r\n\r\n def __add__(self,right):\r\n if type(right) is not Point:\r\n raise TypeError('Point.__add__: right({}) not Point({})'.format(right, type_as_str(right)))\r\n else:\r\n return Point(self.x + right.x,self.y + right.y)\r\n \r\n \r\n\r\n def __mul__(self,right):\r\n if type(right) is not int:\r\n raise TypeError('Point.__mul__: right({}) not Point({})'.format(right, type_as_str(right)))\r\n else:\r\n return Point(self.x*right, self.y*right)\r\n \r\n \r\n\r\n def __rmul__(self,left):\r\n if type(left) is not int:\r\n raise TypeError('Point.__rmul__: left({}) not Point({})'.format(left, type_as_str(left)))\r\n else:\r\n return Point(left*self.x, left*self.y)\r\n \r\n\r\n def __lt__(self,right):\r\n if type(right) in [int,float]:\r\n return math.sqrt(self.x**2+self.y**2) < right\r\n if type(right) is Point:\r\n return math.sqrt(self.x**2+self.y**2) < math.sqrt(right.x**2+right.y**2)\r\n else:\r\n raise TypeError(\"The right operand ({}) is not of type int, float, or Point({})\".format(right, type_as_str(right)))\r\n \r\n def __getitem__(self,index):\r\n if index == 'x' or index == 0:\r\n return self.x\r\n if index == 'y' or index == 1 and type(index) in [int, str]:\r\n return self.y\r\n else:\r\n raise IndexError(\"Point.__getitem__: index{}{}\".format(index, type_as_str(index))) \r\n \r\n \r\n \r\n \r\n \r\n def __call__(self,x,y):\r\n if type(x) != int or type(y) != int:\r\n raise AssertionError(\"Point.__call__: x{} and Point.__call__: y{} is not an int x{} and not int y{}\".format(x,y, type_as_str(x), type_as_str(y)))\r\n else:\r\n self.x = x\r\n self.y = y\r\n \r\n \r\n\r\n\r\n\r\nfrom collections import defaultdict\r\nclass History:\r\n def __init__(self):\r\n self.history = defaultdict(list)\r\n \r\n def __getattr__(self,name):\r\n pass\r\n \r\n\r\n \r\n def __setattr__(self,name,value):\r\n pass \r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n \r\n if prompt.for_bool('Test expand?',True):\r\n pd = dict(digit=r'\\d', integer=r'[=-]?#digit##digit#*')\r\n expand_re(pd)\r\n print('result =',pd)\r\n # computes and prints the dictionary {'digit': '\\\\d', 'integer': '[=-]?(\\\\d)(\\\\d)*'}\r\n \r\n pd = dict(integer =r'[+-]?\\d+',\r\n integer_range =r'#integer#(..#integer#)?',\r\n integer_list =r'#integer_range#(?,#integer_range#)*',\r\n integer_set =r'{#integer_list#?}')\r\n expand_re(pd)\r\n print('result =',pd)\r\n # computes and prints the dictionary \r\n # {'integer' : '[+-]?\\\\d+',\r\n # 'integer_range': '([+-]?\\\\d+)(..([+-]?\\\\d+))?',\r\n # 'integer_list' : '(([+-]?\\\\d+)(..([+-]?\\\\d+))?)(?,(([+-]?\\\\d+)(..([+-]?\\\\d+))?))*', \r\n # 'integer_set' : '{((([+-]?\\\\d+)(..([+-]?\\\\d+))?)(?,(([+-]?\\\\d+)(..([+-]?\\\\d+))?))*)?}'\r\n # }\r\n \r\n pd = dict(a='correct',b='#a#',c='#b#',d='#c#',e='#d#',f='#e#',g='#f#')\r\n expand_re(pd)\r\n print('result =',pd)\r\n # computes and prints the dictionary \r\n # {'d': '(((correct)))',\r\n # 'c': '((correct))',\r\n # 'b': '(correct)',\r\n # 'a': 'correct',\r\n # 'g': '((((((correct))))))',\r\n # 'f': '(((((correct)))))',\r\n # 'e': '((((correct))))'\r\n # }\r\n \r\n import driver\r\n driver.driver()\r\n","sub_path":"ICS33/q3helper/q3solution.py","file_name":"q3solution.py","file_ext":"py","file_size_in_byte":4435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"98606553","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport time # agent의 움직임을 초단위로 보여주기 위한 time 패키지 import\nimport numpy as np # matrix 연산을 지원하는 numpy 패키지 import\nimport tkinter as tk #GUI 화면 구성을 위한 tkinter 패키지 import\nfrom PIL import ImageTk, Image # 이미지 처리용 패키지 PIL import\n\nPhotoImage = ImageTk.PhotoImage # Photo Image instance 생성\nUNIT = 50 # 픽셀 수\nHEIGHT = 5 # 그리드 세로\nWIDTH = 5 # 그리드 가로\n\nnp.random.seed(1) # random seed 설정\n\n\nclass Env(tk.Tk): # 환경을 정의한 ENV 클래스 선언\n def __init__(self, render_speed = 0.01): # 클래스의 생성자 정의(게임 속도 설정)\n super(Env, self).__init__() # 다중 상속을 사용하는 하위 클래스가 MRO(Method Resolution Order)에서 올바른 다음 상위 클래스 함수를 호출함\n self.render_speed = render_speed # 게임 속도 초기화\n self.action_space = ['u', 'd', 'l', 'r'] # action space 초기화\n self.action_size = len(self.action_space) # action_size 초기화\n self.title('REINFORCE') # GUI 타이틀 초기화\n self.geometry('{0}x{1}'.format(HEIGHT * UNIT, HEIGHT * UNIT))\n self.shapes = self.load_images() # load_images 함수에서 이미지를 읽어옴\n self.canvas = self._build_canvas() # Canvas 생성\n self.counter = 0 # step counter 초기화\n self.rewards = [] # reward 초기화\n self.goal = [] # goal 초기화\n # 장애물 설정\n self.set_reward([0, 1], -1)\n self.set_reward([1, 2], -1)\n self.set_reward([2, 3], -1)\n # 목표지점 설정\n self.set_reward([4, 4], 1)\n\n def _build_canvas(self): # _build_canvas 함수를 정의함\n canvas = tk.Canvas(self, bg='white',\n height=HEIGHT * UNIT,\n width=WIDTH * UNIT) # canvas 객체 생성,배경색을 'white'로 설정, height, width는 각각 5 * 50 = 250\n # 그리드 생성\n for c in range(0, WIDTH * UNIT, UNIT): # 그리드 생성을 위한 세로줄 그리기\n x0, y0, x1, y1 = c, 0, c, HEIGHT * UNIT\n canvas.create_line(x0, y0, x1, y1)\n for r in range(0, HEIGHT * UNIT, UNIT): # 그리드 생성을 위한 가로줄 그리기\n x0, y0, x1, y1 = 0, r, HEIGHT * UNIT, r\n canvas.create_line(x0, y0, x1, y1)\n\n self.rewards = [] # 보상 초기화\n self.goal = [] # goal 초기화\n # 캔버스에 이미지 추가\n x, y = UNIT/2, UNIT/2 \n self.rectangle = canvas.create_image(x, y, image=self.shapes[0]) # rectangle 그리기\n\n # canvas 객체의 pack함수를 사용해 canvas 완성\n canvas.pack()\n\n return canvas\n\n def load_images(self): # 이미지를 PhotoImage 객체로 로드하는 load images함수를 정의함\n rectangle = PhotoImage(\n Image.open(\"rectangle.png\").resize((30, 30)))\n triangle = PhotoImage(\n Image.open(\"triangle.png\").resize((30, 30)))\n circle = PhotoImage(\n Image.open(\"circle.png\").resize((30, 30)))\n\n return rectangle, triangle, circle # 생성한 PhotoImage 객체 반환\n\n def reset_reward(self): # 한 episode를 진행한 후 위치를 reset하는 함수 정의\n\n for reward in self.rewards: # reward에 저장한 canvas들을 삭제\n self.canvas.delete(reward['figure'])\n\n self.rewards.clear() # reward 초기화(장애물 3개, 목표지점 1개)\n self.goal.clear() # goal 초기화(목표지점만)\n self.set_reward([0, 1], -1) # 장애물 위치 초기화\n self.set_reward([1, 2], -1)\n self.set_reward([2, 3], -1)\n\n # goal위치 초기화\n self.set_reward([4, 4], 1)\n\n def set_reward(self, state, reward): # 위치와 보상을 입력하고 rewards 변수에서 저장하는 함수\n state = [int(state[0]), int(state[1])] # 위치를 list 형식으로 저장\n x = int(state[0]) # canvas에 있는 위치\n y = int(state[1])\n temp = {} # dict 형식으로, 보상, canvas image 방향 등 변수를 임시저장\n if reward > 0: # reward가 0보다 크다면\n temp['reward'] = reward # temp중에 key가 reward라는 변수를 통해 reward를 저장함\n temp['figure'] = self.canvas.create_image((UNIT * x) + UNIT / 2,\n (UNIT * y) + UNIT / 2,\n image=self.shapes[2]) # canvas image를 figure에서 저장\n\n self.goal.append(temp['figure']) # 도착한 canvas image를 goal에서 저장함\n\n\n elif reward < 0: # reward가 0보다 작은 경우 (장애물이 있음)\n temp['direction'] = -1 # 방향을 -1로 저장\n temp['reward'] = reward # reward 저장\n temp['figure'] = self.canvas.create_image((UNIT * x) + UNIT / 2,\n (UNIT * y) + UNIT / 2,\n image=self.shapes[1]) # image 저장\n\n temp['coords'] = self.canvas.coords(temp['figure']) # canvas image의 bounding box(x1, y1, x2, y2)를 저장\n temp['state'] = state # 상태 저장\n self.rewards.append(temp) # 모든 정보를 reward list에서 append\n\n # new methods\n\n def check_if_reward(self, state): # 상태를 입력으로 목표지점에 도착할 수 있는지 확인\n check_list = dict() # dict 정의\n check_list['if_goal'] = False # 판단 변수 초기화\n rewards = 0 # 보상 초기화\n\n for reward in self.rewards: # 3개 장애물, 1개 목표지점을 for로 iteration함\n if reward['state'] == state: # 지금의 위치와 장애물 혹은 목표지점에 도착한다면\n rewards += reward['reward'] # 보상받기\n if reward['reward'] == 1: # 목표지점에 도착한 경우 if_goal을 True로 변경\n check_list['if_goal'] = True \n\n check_list['rewards'] = rewards # 보상 저장\n\n return check_list\n\n def coords_to_state(self, coords): # canvas 위치를 그리드월드 위치로 변환하는 함수\n x = int((coords[0] - UNIT / 2) / UNIT) # x 위치 계산\n y = int((coords[1] - UNIT / 2) / UNIT) # y 위치 계산\n return [x, y]\n\n def reset(self): # 환경 reset 함수\n self.update() # class update\n time.sleep(0.5) # 0.5초 멈춘다.\n x, y = self.canvas.coords(self.rectangle) # 출발 지점의 위치를 계산\n self.canvas.move(self.rectangle, UNIT / 2 - x, UNIT / 2 - y) # canvas에서 출발 지점 그리기\n self.reset_reward() # reset_reward 함수를 통해 장애물과 도착지점 초기화\n return self.get_state() # get_state 함수를 통해 모든 정보가 들어가 있는 상태를 계산\n\n def step(self, action): # 한 time step 진행한 함수\n self.counter += 1 # time step 횟수 +1\n self.render() # 게임 진행(0.01초 sleep)\n\n if self.counter % 2 == 1: # 2번마다 reward 계산\n self.rewards = self.move_rewards()\n\n next_coords = self.move(self.rectangle, action) # 행동을 입력해 rectangle의 위치를 계사ㅏㄴ\n check = self.check_if_reward(self.coords_to_state(next_coords)) # 목표지점에 도착하는지 판단\n done = check['if_goal'] # 목표 지점 도착 판단 받기\n reward = check['rewards'] # reward 도착 판단 받기\n\n self.canvas.tag_raise(self.rectangle) # 행동에 따라 이동된 rectangle을 canvas의 top level에 표시\n\n s_ = self.get_state()\n\n return s_, reward, done\n\n def get_state(self): # 정보를 합해서 상태로 저장\n\n location = self.coords_to_state(self.canvas.coords(self.rectangle)) # coords_to_state를 통해 현재의 그리드월드 위치를 계산\n agent_x = location[0] # 좌표 x 받음\n agent_y = location[1] # 좌표 y 받음\n\n states = list() # 상태 list 초기화\n\n for reward in self.rewards: # 장애물, 목표지점 정보를 for로 읽어옴\n reward_location = reward['state'] # 장애물(목표지점)의 그리드월드 위치\n states.append(reward_location[0] - agent_x) # 장애물(목표지점)과 현재 위치의 상대 위치 x\n states.append(reward_location[1] - agent_y) # 장애물(목표지점)과 현재 위치의 상대 위치 y\n if reward['reward'] < 0: # 보상이 0보다 작으면\n states.append(-1) # -1과\n states.append(reward['direction']) # 방향을 append\n else: # 보상이 0보다 크면\n states.append(1) # 목표지점에 도착했기 때문에 1만 (append)\n\n return states # 상태 list return\n\n def move_rewards(self): # step을 진행한 후 reward 다시 계산\n new_rewards = [] # reward list 선언\n for temp in self.rewards: # 기존의 reward를 읽어오기\n if temp['reward'] == 1: # 목표지점인 경우\n new_rewards.append(temp)\n continue\n temp['coords'] = self.move_const(temp) # move_const 함수를 통해 이동된 장애물의 canvas image 위치를 다시 계산\n temp['state'] = self.coords_to_state(temp['coords']) # canvas 좌표를 그리드월드 좌표로 변환\n new_rewards.append(temp) # 장애물 정보를 저장\n return new_rewards # new_reward를 반환함\n\n def move_const(self, target): # 장애물의 움직임을 판단하는 함수\n\n s = self.canvas.coords(target['figure']) # 장애물의 위치\n\n base_action = np.array([0, 0]) # action array 초기화\n\n if s[0] == (WIDTH - 1) * UNIT + UNIT / 2: # 장애물의 방향이 오른쪽을 향하는 경우\n target['direction'] = 1 # 오른쪽으로 이동\n elif s[0] == UNIT / 2: # 장애물의 방향이 왼쪽인 경우\n target['direction'] = -1 # 왼쪽으로 이동\n\n if target['direction'] == -1: # 왼쪽으로 이동\n base_action[0] += UNIT\n elif target['direction'] == 1: # 오른쪽으로 이동 \n base_action[0] -= UNIT\n\n if (target['figure'] is not self.rectangle # 장애물 위치와 rectangle 위치가 다르고\n and s == [(WIDTH - 1) * UNIT, (HEIGHT - 1) * UNIT]): # 목표 지점에 도착한 경우\n base_action = np.array([0, 0]) # action array 초기화\n\n self.canvas.move(target['figure'], base_action[0], base_action[1]) # 장애물의 canvas 위치를 변경\n\n s_ = self.canvas.coords(target['figure']) # 이동한 위치를 저장\n\n return s_\n\n def move(self, target, action): # rectangle로 행동에 따라 이동하고 위치를 반환하는 함수\n s = self.canvas.coords(target) # rectangle 위치 받기\n\n base_action = np.array([0, 0]) # 행동 초기화\n\n if action == 0: # up\n if s[1] > UNIT:\n base_action[1] -= UNIT\n elif action == 1: # down\n if s[1] < (HEIGHT - 1) * UNIT:\n base_action[1] += UNIT\n elif action == 2: # right\n if s[0] < (WIDTH - 1) * UNIT:\n base_action[0] += UNIT\n elif action == 3: # left\n if s[0] > UNIT:\n base_action[0] -= UNIT\n\n self.canvas.move(target, base_action[0], base_action[1]) # rectangle을 행동에 따라 canvas에서 이동\n\n s_ = self.canvas.coords(target) # 이동된 위치 저장\n\n return s_\n\n def render(self): # 게임 속도를 조정한 함수\n time.sleep(0.07) # render_speed 변수에 따라 멈추게 됨\n self.update() # update\n\n","sub_path":"week13/Reinforce/environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":11849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"441251686","text":"import numpy as np\nimport copy as cp\nimport time\nimport ncon\n\n########################################################################################################################\n# #\n# DOUBLE-EDGE FACTOR GRAPH (DEFG) CLASS #\n# #\n########################################################################################################################\n\n\nclass defg:\n\n def __init__(self, number_of_nodes=None):\n self.nCounter = 0\n self.factors = {}\n self.nodes_InsertOrder = []\n self.nodes_indices = {}\n self.nodes = {}\n self.nodesBeliefs = None\n self.rdms_dotProduct = None\n self.twoBodyRDMS = None\n self.factorsBeliefs = None\n self.fCounter = 0\n self.messages_n2f = None\n self.messages_f2n = None\n self.node_partition = None\n self.factor_partition = None\n self.all_messages = None\n self.rdms_broadcasting = None\n\n def add_node(self, alphabet, nName):\n \"\"\"\n Adding a node to the double-edge factor graph (DEFG)\n :param alphabet: the alphabet size of the node random variable\n :param nName: name of node\n :return: None\n \"\"\"\n self.nodes[nName] = [alphabet, set(), self.nCounter]\n self.nodes_InsertOrder.append(nName)\n self.nodes_indices[nName] = self.nCounter\n self.nCounter += 1\n\n def add_factor(self, nodeNeighbors, tensor):\n \"\"\"\n Adding a factor to the DEFG\n :param nodeNeighbors: the factor's neighboring node in a dictionary {node_name: index in tensor, ...}\n :param tensor: an n dimensional np.array\n :return: None\n \"\"\"\n fName = 'f' + str(self.fCounter)\n for n in nodeNeighbors.keys():\n if n not in self.nodes.keys():\n raise IndexError('Tried to factor non exciting node')\n if tensor.shape[nodeNeighbors[n]] != self.nodes[n][0]:\n raise IndexError('There is a mismatch between node alphabet and tensor index size')\n self.nodes[n][1].add(fName)\n self.factors[fName] = [nodeNeighbors, tensor, self.fCounter]\n self.fCounter += 1\n\n########################################################################################################################\n# #\n# DEFG BELIEF PROPAGATION ALGORITHM #\n# #\n########################################################################################################################\n\n def sumProduct(self,\n tmax,\n epsilon,\n dumping,\n initializeMessages=None,\n printTime=None,\n RDMconvergence=None):\n\n factors = self.factors\n nodes = self.nodes\n\n # initialize all messages\n if initializeMessages and self.messages_n2f and self.messages_f2n:\n node2factor = cp.deepcopy(self.messages_n2f)\n factor2node = cp.deepcopy(self.messages_f2n)\n else:\n node2factor = {}\n factor2node = {}\n for n in nodes.keys():\n node2factor[n] = {}\n alphabet = nodes[n][0]\n for f in nodes[n][1]:\n node2factor[n][f] = self.messageInit(alphabet)\n for f in factors.keys():\n factor2node[f] = {}\n for n in factors[f][0]:\n alphabet = nodes[n][0]\n factor2node[f][n] = self.messageInit(alphabet)\n # save messages\n self.messages_n2f = cp.deepcopy(node2factor)\n self.messages_f2n = cp.deepcopy(factor2node)\n\n # calculate two body RDMS in order to check the convergence of BP\n if RDMconvergence:\n self.calculateTwoBodyRDMS()\n\n for t in range(tmax):\n\n # save previous step messages\n preMessages_f2n = cp.deepcopy(factor2node)\n preMessages_n2f = cp.deepcopy(node2factor)\n\n # calculating factor to node (f -> n) messages\n for f in factors.keys():\n for n in factors[f][0].keys():\n factor2node[f][n] = dumping * preMessages_f2n[f][n] + (1. - dumping) * self.f2n_message(f, n, node2factor)\n factor2node[f][n] /= np.trace(factor2node[f][n])\n\n # calculating node to factor (n -> f) messages\n for n in nodes.keys():\n alphabet = nodes[n][0]\n for f in nodes[n][1]:\n neighbors = cp.deepcopy(nodes[n][1])\n neighbors.remove(f)\n tempMessage = np.ones((alphabet, alphabet), dtype=complex)\n for item in neighbors:\n tempMessage *= factor2node[item][n]\n\n node2factor[n][f] = dumping * preMessages_n2f[n][f] + (1. - dumping) * tempMessage\n node2factor[n][f] /= np.trace(node2factor[n][f])\n\n # save this step new messages\n self.messages_n2f = cp.deepcopy(node2factor)\n self.messages_f2n = cp.deepcopy(factor2node)\n\n # check convergence using RDMS or messages\n if RDMconvergence and t > 0:\n if self.checkBPconvergenceTwoBodyRDMS(epsilon):\n break\n else:\n if self.checkBPconvergence(preMessages_n2f, preMessages_f2n, epsilon):\n break\n if printTime and t < tmax - 1:\n print(\"BP converged in %d iterations \" % t)\n #else: print(\"BP didn't converged, num of total iterations was {}\".format(tmax))\n return\n\n def f2n_message(self, f, n, messages):\n neighbors, tensor, index = cp.deepcopy(self.factors[f])\n conjTensor = cp.copy(np.conj(tensor))\n l = cp.copy(len(tensor.shape))\n tensorIdx = list(range(l))\n for item in neighbors:\n if item == n:\n continue\n messageIdx = [self.factors[f][0][item], l + 1]\n tensorFinalIdx = cp.copy(tensorIdx)\n tensorFinalIdx[messageIdx[0]] = messageIdx[1]\n tensor = np.einsum(tensor, tensorIdx, messages[item][f], messageIdx, tensorFinalIdx)\n conjTensorIdx = cp.copy(tensorIdx)\n conjTensorIdx[self.factors[f][0][n]] = l + 1\n messageFinalIdx = [self.factors[f][0][n], l + 1]\n message = np.einsum(tensor, tensorIdx, conjTensor, conjTensorIdx, messageFinalIdx)\n message /= np.trace(message)\n return message\n\n def messageBroadcasting(self, message, idx, tensor):\n idx = [2 * idx, 2 * idx + 1]\n new_shape = np.ones(len(tensor.shape), dtype=np.int)\n new_shape[idx] = message.shape\n return np.reshape(message, new_shape)\n\n def messageVBroadcasting(self, message, idx, tensor):\n idx = [2 * (idx - 1), 2 * (idx - 1) + 1]\n new_shape = np.ones(len(tensor.shape), dtype=np.int)\n new_shape[idx] = message.shape\n return np.reshape(message, new_shape)\n\n def tensorBroadcasting(self, tensor, idx, sizedTensor):\n new_shape = np.ones(len(sizedTensor.shape), dtype=np.int)\n new_shape[idx] = tensor.shape\n return np.reshape(tensor, new_shape)\n\n def messageInit(self, alphabet):\n return np.ones((alphabet, alphabet), dtype=complex)\n #return np.eye(alphabet, dtype=complex)\n\n def generateSuperTensor(self, tensor):\n tensorIdx = list(range(len(tensor.shape)))\n conjtensorIdx = []\n superTensorIdx = []\n for i in tensorIdx:\n conjtensorIdx.append(i + len(tensorIdx))\n superTensorIdx.append(tensorIdx[i])\n superTensorIdx.append(conjtensorIdx[i])\n superTensor = np.einsum(tensor, tensorIdx, np.conj(tensor), conjtensorIdx, superTensorIdx)\n index = list(range(len(superTensor.shape)))\n index[1] = index[0]\n superTensor = np.einsum(superTensor, index)\n return superTensor\n\n def generateSuperPhysicalTensor(self, tensor):\n tensorIdx = list(range(len(tensor.shape)))\n conjtensorIdx = []\n superTensorIdx = []\n for i in tensorIdx:\n conjtensorIdx.append(i + len(tensorIdx))\n superTensorIdx.append(tensorIdx[i])\n superTensorIdx.append(conjtensorIdx[i])\n superTensor = np.einsum(tensor, tensorIdx, np.conj(tensor), conjtensorIdx, superTensorIdx)\n return superTensor\n\n def checkBPconvergence(self, pre_n2f, pre_f2n, epsilon):\n convergenceCounter = 0\n messagesCounter = 0\n n2f_new, f2n_new = self.messages_n2f, self.messages_f2n\n for n in pre_n2f:\n for f in pre_n2f[n]:\n messagesCounter += 1\n if np.sum(np.abs(pre_n2f[n][f] - n2f_new[n][f])) < epsilon:\n convergenceCounter += 1\n for f in pre_f2n:\n for n in pre_f2n[f]:\n messagesCounter += 1\n if np.sum(np.abs(pre_f2n[f][n] - f2n_new[f][n])) < epsilon:\n convergenceCounter += 1\n if convergenceCounter == messagesCounter:\n return 1\n else:\n return 0\n\n########################################################################################################################\n# #\n# DEFG AUXILIARY FUNCTIONS #\n# #\n########################################################################################################################\n\n def calculateNodesBeliefs(self):\n self.nodesBeliefs = {}\n nodes = self.nodes\n messages = self.messages_f2n\n keys = nodes.keys()\n for n in keys:\n alphabet = nodes[n][0]\n tempMessage = np.ones((alphabet, alphabet), dtype=complex)\n for f in nodes[n][1]:\n tempMessage *= messages[f][n]\n self.nodesBeliefs[n] = tempMessage / np.trace(tempMessage)\n\n def calculateFactorsBeliefs(self):\n self.factorsBeliefs = {}\n factors = self.factors\n messages = self.messages_n2f\n keys = factors.keys()\n for f in keys:\n superTensor = self.generateSuperPhysicalTensor(cp.deepcopy(factors[f][1]))\n neighbors = factors[f][0]\n for n in neighbors.keys():\n superTensor *= self.messageBroadcasting(messages[n][f], neighbors[n], superTensor)\n index = list(range(len(superTensor.shape)))\n index[1] = index[0]\n superTensor = np.einsum(superTensor, index)\n self.factorsBeliefs[f] = superTensor\n\n def calculatePhysicalFactorsBeliefs(self):\n self.factorsBeliefs = {}\n factors = self.factors\n messages = self.messages_n2f\n keys = factors.keys()\n for f in keys:\n superTensor = self.generateSuperPhysicalTensor(cp.deepcopy(factors[f][1]))\n neighbors = factors[f][0]\n for n in neighbors.keys():\n superTensor *= self.messageBroadcasting(messages[n][f], neighbors[n], superTensor)\n self.factorsBeliefs[f] = superTensor\n\n def calculateRDMS_dotProduct(self):\n self.rdms_dotProduct = []\n messages = self.messages_n2f\n for n in range(self.fCounter):\n f = 'f' + str(n)\n neighbors, tensor, index = cp.deepcopy(self.factors[f])\n conjTensor = cp.copy(np.conj(tensor))\n l = len(tensor.shape)\n tensorIdx = list(range(l))\n for node in neighbors:\n messageIdx = [self.factors[f][0][node], l + 1]\n tensorFinalIdx = cp.copy(tensorIdx)\n tensorFinalIdx[messageIdx[0]] = messageIdx[1]\n tensor = np.einsum(tensor, tensorIdx, messages[node][f], messageIdx, tensorFinalIdx)\n conjTensorIdx = cp.copy(tensorIdx)\n conjTensorIdx[0] = l + 1\n messageFinalIdx = [0, l + 1]\n belief = np.einsum(tensor, tensorIdx, conjTensor, conjTensorIdx, messageFinalIdx)\n belief /= np.trace(belief)\n self.rdms_dotProduct.append(belief)\n\n def calculateTwoBodyRDMS(self):\n self.twoBodyRDMS = []\n messages = self.messages_n2f\n for n in range(self.nCounter):\n node = 'n' + str(n)\n # in DEFG every node has only two factor neighbors\n alphabet, fNeighbors, nIndex = cp.deepcopy(self.nodes[node])\n f0, f1 = fNeighbors\n\n # collect the node factor neighbors\n nNeighbors0, factor0, fIndex0 = cp.deepcopy(self.factors[f0])\n nNeighbors1, factor1, fIndex1 = cp.deepcopy(self.factors[f1])\n\n # absorb messages in single edge factors\n for n0 in nNeighbors0:\n if n0 == node:\n continue\n else:\n factor_idx = list(range(len(factor0.shape)))\n message_idx = [int(nNeighbors0[n0]), len(factor0.shape)]\n final_idx = list(range(len(factor0.shape)))\n final_idx[nNeighbors0[n0]] = len(factor0.shape)\n factor0 = np.einsum(factor0, factor_idx, messages[n0][f0], message_idx, final_idx)\n\n for n1 in nNeighbors1:\n if n1 == node:\n continue\n else:\n factor_idx = list(range(len(factor1.shape)))\n message_idx = [int(nNeighbors1[n1]), len(factor1.shape)]\n final_idx = list(range(len(factor1.shape)))\n final_idx[nNeighbors1[n1]] = len(factor1.shape)\n factor1 = np.einsum(factor1, factor_idx, messages[n1][f1], message_idx, final_idx)\n\n # collect the two conjugated factors\n _, factor0star, _ = cp.deepcopy(self.factors[f0])\n _, factor1star, _ = cp.deepcopy(self.factors[f1])\n factor0star = np.conj(factor0star)\n factor1star = np.conj(factor1star)\n\n # prepare for ncon function\n idx_f0 = list(range(len(factor0.shape)))\n idx_f1 = list(range(len(factor0.shape), len(factor0.shape) + len(factor1.shape)))\n idx_f0s = list(range(len(factor0.shape)))\n idx_f1s = list(range(len(factor0.shape), len(factor0.shape) + len(factor1.shape)))\n idx_f0[0] = -1 # i\n idx_f0s[0] = -2 # i'\n idx_f1[0] = -3 # j\n idx_f1s[0] = -4 # j'\n idx_f0[nNeighbors0[node]] = 1000\n idx_f1[nNeighbors1[node]] = 1000\n idx_f0s[nNeighbors0[node]] = 1001\n idx_f1s[nNeighbors1[node]] = 1001\n\n # use ncon to make the calculation\n rdm = ncon.ncon([factor0, factor0star, factor1, factor1star],\n [idx_f0, idx_f0s, idx_f1, idx_f1s])\n rdm = np.reshape(rdm, (rdm.shape[0] * rdm.shape[1], rdm.shape[2] * rdm.shape[3])) # rho_{i * i', j * j'}\n rdm /= np.trace(rdm)\n self.twoBodyRDMS.append(rdm)\n\n def checkBPconvergenceTwoBodyRDMS(self, epsilon):\n previousRDMS = cp.deepcopy(self.twoBodyRDMS)\n self.calculateTwoBodyRDMS()\n newRDMS = self.twoBodyRDMS\n averagedTraceDistance = 0\n for n in range(self.nCounter):\n averagedTraceDistance += self.traceDistance(previousRDMS[n], newRDMS[n])\n averagedTraceDistance /= self.nCounter\n if averagedTraceDistance <= epsilon:\n return 1\n else: return 0\n\n def traceDistance(self, a, b):\n # returns the trace distance between the two density matrices a & b\n # d = 0.5 * norm(a - b)\n eigenvalues = np.linalg.eigvals(a - b)\n d = 0.5 * np.sum(np.abs(eigenvalues))\n return d\n\n def calculateRDMS_broadcasting(self):\n self.rdms_broadcasting = {}\n factors = self.factors\n messages = self.messages_n2f\n keys = factors.keys()\n for f in keys:\n super_tensor = self.generateSuperPhysicalTensor(cp.deepcopy(factors[f][1]))\n neighbors = factors[f][0]\n for n in neighbors.keys():\n super_tensor *= self.messageBroadcasting(messages[n][f], neighbors[n], super_tensor)\n idx = list(range(len(super_tensor.shape)))\n self.rdms_broadcasting[factors[f][2]] = np.einsum(super_tensor, idx, [0, 1])\n self.rdms_broadcasting[factors[f][2]] /= np.trace(self.rdms_broadcasting[factors[f][2]])\n\n def calculateRDMSfromFactorBeliefs(self):\n rdms = {}\n for i in range(self.fCounter):\n rdms[i] = np.einsum(self.factorsBeliefs['f' + str(i)],\n list(range(len(self.factorsBeliefs['f' + str(i)].shape))), [0, 1])\n rdms[i] /= np.trace(rdms[i])\n return rdms\n\n def twoFactorsBelief(self, f1, f2):\n \"\"\"\n Given names of two factors returns the two factors with the n2f messages absorbed over all edges except the\n common one.\n :param f1: name of factor1, i.e. 'f1'\n :param f2: name of factor2, i.e. 'f24'\n :return: two double-edge factors, where the node 2 factor messages are absorbed over all edges except the common\n edge btween factor1 and factor2.\n \"\"\"\n ne1, ten1, idx1 = cp.deepcopy(self.factors[f1])\n ne2, ten2, idx2 = cp.deepcopy(self.factors[f2])\n del_n = []\n for n in ne1:\n if n in ne2:\n del_n.append(n)\n\n # delete common edge\n for n in del_n:\n del ne1[n]\n del ne2[n]\n messages = self.messages_n2f\n super_tensor1 = self.generateSuperPhysicalTensor(ten1)\n super_tensor2 = self.generateSuperPhysicalTensor(ten2)\n\n # absorb n -> f messages\n for n in ne1.keys():\n super_tensor1 *= self.messageBroadcasting(messages[n][f1], ne1[n], super_tensor1)\n for n in ne2.keys():\n super_tensor2 *= self.messageBroadcasting(messages[n][f2], ne2[n], super_tensor2)\n return super_tensor1, super_tensor2\n\n########################################################################################################################\n# #\n# DEFG & BPU AUXILIARY FUNCTIONS #\n# #\n########################################################################################################################\n\n def f2n_message_BPtruncation(self, f, n, messages, newFactor):\n neighbors, tensor, index = cp.deepcopy(self.factors[f])\n tensor = newFactor\n conj_tensor = cp.copy(np.conj(tensor))\n l = cp.copy(len(tensor.shape))\n tensor_idx = list(range(l))\n for item in neighbors:\n if item == n:\n continue\n message_idx = [self.factors[f][0][item], l + 1]\n final_idx = cp.copy(tensor_idx)\n final_idx[message_idx[0]] = message_idx[1]\n tensor = np.einsum(tensor, tensor_idx, messages[item][f], message_idx, final_idx)\n conj_tensor_idx = cp.copy(tensor_idx)\n conj_tensor_idx[self.factors[f][0][n]] = l + 1\n message_final_idx = [self.factors[f][0][n], l + 1]\n message = np.einsum(tensor, tensor_idx, conj_tensor, conj_tensor_idx, message_final_idx)\n message /= np.trace(message)\n return message\n\n # needs checking\n def absorb_message_into_factor_in_env(self, f, nodes_out):\n # return a copy of f super physical tensor with absorbed message from node n\n ne, ten, idx = cp.deepcopy(self.factors[f])\n messages = self.messages_n2f\n super_tensor = self.generateSuperPhysicalTensor(ten)\n for n in ne:\n if n in nodes_out:\n super_tensor *= self.messageBroadcasting(messages[n][f], ne[n], super_tensor)\n return super_tensor\n\n # needs checking\n def absorb_message_into_factor_in_env_efficient(self, f, nodes_out):\n # return a copy of f tensor with absorbed message from node n\n ne, ten, idx = cp.deepcopy(self.factors[f])\n messages = self.messages_n2f\n for n in ne:\n if n in nodes_out:\n idx = list(range(len(ten.shape)))\n final_idx = list(range(len(ten.shape)))\n final_idx[ne[n]] = len(ten.shape)\n ten = np.einsum(ten, idx, messages[n][f], [len(ten.shape), ne[n]], final_idx)\n return ten\n\n########################################################################################################################\n# #\n# DEFG EXACT CALCULATIONS #\n# #\n########################################################################################################################\n\n # needs checking\n def exact_joint_probability(self):\n factors = cp.deepcopy(self.factors)\n p_dim = []\n p_order = []\n p_dic = {}\n counter = 0\n for i in range(self.nCounter):\n p_dic[self.nodes_InsertOrder[i]] = counter\n p_dic[self.nodes_InsertOrder[i] + '*'] = counter + 1\n p_order.append(self.nodes_InsertOrder[i])\n p_order.append(self.nodes_InsertOrder[i] + '*')\n p_dim.append(self.nodes[self.nodes_InsertOrder[i]][0])\n p_dim.append(self.nodes[self.nodes_InsertOrder[i]][0])\n counter += 2\n p = np.ones(p_dim, dtype=complex)\n for item in factors.keys():\n f = self.generateSuperTensor(factors[item][1])\n broadcasting_idx = [0] * len(f.shape)\n for object in factors[item][0]:\n broadcasting_idx[2 * (factors[item][0][object] - 1)] = p_dic[object]\n broadcasting_idx[2 * (factors[item][0][object] - 1) + 1] = p_dic[object + '*']\n permute_tensor_indices = np.argsort(broadcasting_idx)\n f = np.transpose(f, permute_tensor_indices)\n broadcasting_idx = np.sort(broadcasting_idx)\n p *= self.tensorBroadcasting(f, broadcasting_idx, p)\n return p, p_dic, p_order\n\n # needs checking\n def exact_nodes_marginal(self, p, p_dic, p_order, nodes_list):\n marginal = cp.deepcopy(p)\n final_idx = [0] * len(nodes_list)\n for i in range(len(nodes_list)):\n final_idx[i] = p_dic[nodes_list[i]]\n marginal = np.einsum(marginal, list(range(len(marginal.shape))), final_idx)\n return marginal\n\n\n\n\n\n\n\n\n\n","sub_path":"DoubleEdgeFactorGraphs.py","file_name":"DoubleEdgeFactorGraphs.py","file_ext":"py","file_size_in_byte":23706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"54473277","text":"# Finding missing value\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\navocados = pd.read_csv('avocados_sales.csv')\navocados_2016 = avocados[avocados['year'] == '2016']\n# Check individual values for missing values\nprint(avocados_2016.isna())\n\n# Check each column for missing values\nprint(avocados_2016.isna().any())\n\n# Bar plot of missing values by variable\navocados_2016.isna().sum().plot(kind='bar')\n\n# Show plot\nplt.show()\n\n# Removing missing values\navocados_complete = avocados_2016.dropna()\n\n# Check if any columns contain missing values\nprint(avocados_complete.isna().any())\n\n# List the columns with missing values\ncols_with_missing = [\"small_sold\", \"large_sold\", \"xl_sold\"]\n\n# Create histograms showing the distributions cols_with_missing\navocados_2016[cols_with_missing].hist()\nplt.show()\n\n# Fill in missing values with 0\navocados_filled = avocados_2016.fillna(0)\n\n# Create histograms of the filled columns\navocados_filled[cols_with_missing].hist()\nplt.show()","sub_path":"Data Manipulation with Pandas/missing_value.py","file_name":"missing_value.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"190353711","text":"import datetime\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.db.models.signals import post_save\nfrom django.core.urlresolvers import reverse\nfrom django.contrib.sites.models import Site\n\nfrom django.core.validators import RegexValidator\nfrom django.core.mail import send_mail\nfrom django.utils.text import slugify\nfrom django.conf import settings\nfrom django.db import models\n\n\nfrom phonenumber_field.modelfields import PhoneNumberField\nfrom categories.utils import unique_slug_generator\n\n\nUser = settings.AUTH_USER_MODEL\n\n\n# ALPHANUMERIC VALIDATOR FOR FIELDS\nalphanumeric = RegexValidator(r'^[A-Za-z0-9\\s]*$', 'Solo caracteres a-z, A-Z, 0-9.')\n\n# SAVE PATHS FOR UPLOADS\ndef site_logo_directory_path(instance, filename):\n # file will be uploaded to MEDIA_ROOT/site_site-name/\n site_folder = slugify(instance.site_name)\n return 'site-{0}/logos/{1}'.format(site_folder, filename)\n\ndef site_about_directory_path(instance, filename):\n # file will be uploaded to MEDIA_ROOT/site_site-name/\n site_folder = slugify(instance.site_name)\n return 'site-{0}/about/{1}'.format(site_folder, filename)\n\nclass Template(models.Model):\n display_name = models.CharField(_('Display name'), max_length=40, blank=True)\n template_file = models.CharField(_('Template file'), max_length=100, blank=True)\n\n def __str__(self):\n return self.display_name\n\nclass BasicSite(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE,)\n site_name = models.CharField(_(\"Nombre del sitio\"), max_length=100, blank=False, help_text='Evita utilizar caracteres especiales & $ % * -_ . , : ; \\\" + =', validators=[alphanumeric])\n template = models.ForeignKey(Template, on_delete=models.CASCADE,)\n site_has_domain = models.BooleanField(_('Tiene dominio'), default=False)\n site = models.ForeignKey(Site, on_delete=models.CASCADE)\n site_title_tag = models.CharField(_(\"Title Tag para el sitio\"), max_length=100, blank=True)\n site_description_tag = models.TextField(_(\"Description Tag para el sitio\"), max_length=300, blank=True, null=True, help_text='meta tag description')\n site_keywords_tag = models.CharField(_(\"keywords tag para el sitio\"), max_length=255, blank=True, help_text='meta tag keywords')\n city = models.CharField(_(\"City\"), max_length=100, blank=True, default='San Salvador')\n phone = PhoneNumberField(blank=True)\n favicon = models.ImageField(upload_to=site_logo_directory_path, blank=True)\n logo = models.ImageField(upload_to=site_logo_directory_path, blank=True)\n square_logo = models.ImageField(upload_to=site_logo_directory_path, blank=True)# file for OpenGraph metadata\n about_section_name = models.CharField(_(\"Nombre del la sección acerca de\"), max_length=100, blank=True)\n about_image = models.ImageField(upload_to=site_about_directory_path, blank=True)\n about_description = models.TextField(_(\"Descripción Acerca De\"), max_length=550, blank=True, null=True, help_text='Descripción general')\n about_background = models.ImageField(upload_to=site_about_directory_path, blank=True)\n service_section_name = models.CharField(_(\"Nombre del la sección de servicios\"), max_length=100, blank=True)\n service_background = models.ImageField(upload_to=site_logo_directory_path, blank=True)\n benefit_section_name = models.CharField(_(\"Nombre del la sección de beneficios\"), max_length=100, blank=True)\n benefit_background = models.ImageField(upload_to=site_logo_directory_path, blank=True)\n contact_background = models.ImageField(upload_to=site_logo_directory_path, blank=True)\n last_updated = models.DateTimeField(auto_now_add=True)\n views = models.PositiveIntegerField(default=0, blank=True)\n address = models.TextField(_(\"Dirección de local\"), max_length=255, blank=True, null=True, help_text='dirección')\n slug = models.SlugField(_(\"Site URL\"), null=True, blank=True, help_text='URL personalizada para tu sitio.')\n\n\n def __str__(self):\n return self.site_name\n\n def get_absolute_url(self):\n return reverse('basic_sites:home', kwargs={'slug': self.slug})\n\n # def get_site_in_sites(self):\n # return \",\\n\".join([u.name for u in self.sites_by_category.all()])\n\n # def save(self, *args, **kwargs):\n # if not self.slug:\n # self.slug = unique_slug_generator(self, self.user.get_full_name())\n # super().save()\n\n\nclass Benefit(models.Model):\n site_benefits = models.ForeignKey(BasicSite, on_delete=models.CASCADE)\n name = models.CharField(_('Nombre del beneficio'), max_length=100, blank=True)\n description = models.TextField(_(\"Description\"), max_length=255, blank=True, null=True, help_text='Pequeña introducción de ti') \n\n class Meta:\n ordering = ['pk']\n\n def __str__(self):\n return self.name\n\n\nclass Schedule(models.Model):\n DAYS = (\n ('1', ('Lunes')),\n ('2', ('Martes')),\n ('3', ('Miércoles')),\n ('4', ('jueves')),\n ('5', ('Viernes')),\n ('6', ('Sábado')),\n ('7', ('Domingo')),\n )\n site_schedule = models.ForeignKey(BasicSite, on_delete=models.CASCADE,)\n weekday = models.CharField(max_length=25, choices=DAYS, blank=True)\n opening_hour = models.TimeField(blank=True)\n closing_hour = models.TimeField(blank=True)\n \n class Meta:\n ordering = ('weekday', 'opening_hour')\n unique_together = ('weekday', 'opening_hour', 'closing_hour')\n\n def __str__(self):\n return '{}: {} - {}'.format(self.get_weekday_display(), self.opening_hour, self.closing_hour)\n\n\ndef site_icons_directory_path(instance, filename):\n # file will be uploaded to MEDIA_ROOT/user_/\n site_folder = slugify(instance.site_name)\n return 'site-{0}/icons/{1}'.format(site_folder, filename)\n\nclass Service(models.Model):\n site_name = models.ForeignKey(BasicSite, on_delete=models.CASCADE,)\n name = models.CharField(_('Servicio'),max_length=100, blank=False)\n icon = models.ImageField(upload_to=site_icons_directory_path, blank=True)\n description = models.TextField(_(\"Descripción del servicio\"), max_length=850, blank=True, null=True, help_text='Pequeña descripción del servicio')\n\n def __str__(self):\n return self.name\n\n\nclass SocialNetwork(models.Model):\n site_sn = models.ForeignKey(BasicSite, on_delete=models.CASCADE,)\n social_network = models.CharField(_('Nombre de la Red Social'), max_length=100, blank=True)\n sn_link = models.URLField(_('URL'), blank=True)\n\n def __str__(self):\n return self.social_network\n\n\ndef site_sliders_directory_path(instance, filename):\n # file will be uploaded to MEDIA_ROOT/user_/\n site_folder = slugify(instance.site_name)\n return 'site-{0}/sliders/{1}'.format(site_folder, filename)\n\nclass Slider(models.Model):\n site_name = models.ForeignKey(BasicSite, on_delete=models.CASCADE,)\n slider_image = models.ImageField(upload_to=site_sliders_directory_path, blank=True)\n slider_title = models.CharField(_('Título de la imagen'), max_length=100, blank=True)\n slider_description = models.CharField(_('Descripción de la imagen'), max_length=200, blank=True)\n slider_link = models.URLField(_('Slider URL'), blank=True)\n\n def __str__(self):\n return self.slider_title\n\n\ndef site_gallery_directory_path(instance, filename):\n # file will be uploaded to MEDIA_ROOT/user_/\n site_folder = slugify(instance.site_name)\n return 'site-{0}/gallery/{1}'.format(site_folder, filename)\n\n\nclass Gallery(models.Model):\n site_name = models.ForeignKey(BasicSite, on_delete=models.CASCADE,)\n gallery_img_name = models.CharField(_('Nombre de la imagen'), max_length=100, blank=True)\n gallery_img = models.ImageField(upload_to=site_gallery_directory_path, blank=True)\n gallery_img_description = models.CharField(_('Descripción de la imagen'), max_length=100, blank=True)\n gallery_img_link = models.URLField(_('URL de la imagen'), blank=True)\n\n def __str__(self):\n return self.gallery_img_name\n\n\n# class PlanOne(models.Model):\n# site_name = models.ForeignKey(BasicSite, on_delete=models.CASCADE,)\n# plan_name = models.CharField(_('Nombre del plan'),max_length=100, blank=False)\n# price = models.CharField(_('Precio del plan'),max_length=100, blank=False)\n# benefit_one = models.CharField(_('Precio del plan'),max_length=100, blank=False)\n# benefit_two = models.CharField(_('Precio del plan'),max_length=100, blank=False)\n# benefit_three = models.CharField(_('Precio del plan'),max_length=100, blank=False)\n# benefit_four = models.CharField(_('Precio del plan'),max_length=100, blank=False)\n# benefit_five = models.CharField(_('Precio del plan'),max_length=100, blank=False)\n# benefit_six = models.CharField(_('Precio del plan'),max_length=100, blank=False)\n# benefit_seven = models.CharField(_('Precio del plan'),max_length=100, blank=False)\n# benefit_eight = models.CharField(_('Precio del plan'),max_length=100, blank=False)\n# benefit_nine = models.CharField(_('Precio del plan'),max_length=100, blank=False)\n# benefit_ten = models.CharField(_('Precio del plan'),max_length=100, blank=False)\n\n\n# # # SIGNAL\n# # def create_site(sender, **kwargs):\n# # if kwargs['created']:\n# # site = BasicSite.objects.create(user=kwargs['instance'])\n\n# # post_save.connect(create_site, sender=User)\n","sub_path":"basic_sites/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":10168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"133601510","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Nov 28 13:12:37 2018\r\n\r\n@author: pc1\r\n\"\"\"\r\n\r\nimport requests\r\nimport time\r\nfrom bs4 import BeautifulSoup\r\n\r\nurl = \"http://hz.58.com/ruanjiangong/pn{}\"\r\nheaders = {\r\n 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'\r\n }\r\n\r\ndef spider():\r\n for i in range(8):\r\n req = requests.get(url.format(str(i + 1)),headers=headers)\r\n req.encoding = \"utf-8\" #设置成网页的编码\r\n soup = BeautifulSoup(req.text, \"lxml\")\r\n items = soup.select(\"li.job_item\")\r\n for item in items:\r\n address = item.select(\"div.item_con span.address\")[0].text #select()返回的是list类型\r\n name = item.select(\"div.item_con span.name\")[0].text\r\n salary = item.select(\"div.item_con p.job_salary\")[0].text\r\n if len(item.select(\"div.item_con div.job_wel\")) > 0:\r\n welfare = item.select(\"div.item_con div.job_wel\")[0].text\r\n company = item.select(\"div.item_con div.comp_name a.fl\")[0].text\r\n href = item.select(\"div.item_con div.comp_name a.fl\")[0].get(\"href\")\r\n print(\"%s\\t%s\\t%s\\t%s\\t%s\\t%s\"%(address, name, salary, company,welfare,href))\r\n time.sleep(2)\r\n \r\nif __name__ == '__main__':\r\n spider()\r\n\r\n","sub_path":"58同城爬虫.py","file_name":"58同城爬虫.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"314940064","text":"# -*- coding: utf-8 -*-\n\nimport ast\n\nfrom wemake_python_styleguide.errors.general import (\n RaiseNotImplementedViolation,\n WrongKeywordViolation,\n)\nfrom wemake_python_styleguide.visitors.base import BaseNodeVisitor\n\n\nclass WrongRaiseVisitor(BaseNodeVisitor):\n \"\"\"This class finds wrong `raise` keywords.\"\"\"\n\n def _check_exception_type(self, node: ast.Raise) -> None:\n exception = getattr(node, 'exc', None)\n if exception is None:\n return\n\n exception_func = getattr(exception, 'func', None)\n if exception_func:\n exception = exception_func\n\n exception_name = getattr(exception, 'id', None)\n if exception_name == 'NotImplemented':\n self.add_error(\n RaiseNotImplementedViolation(node, text=exception_name),\n )\n\n def visit_Raise(self, node: ast.Raise) -> None:\n \"\"\"\n Checks how `raise` keyword is used.\n\n Raises:\n RaiseNotImplementedViolation\n\n \"\"\"\n self._check_exception_type(node)\n self.generic_visit(node)\n\n\nclass WrongKeywordVisitor(BaseNodeVisitor):\n \"\"\"This class is responsible for finding wrong keywords.\"\"\"\n\n def visit_Global(self, node: ast.Global) -> None:\n \"\"\"\n Used to find `global` keyword.\n\n Raises:\n WrongKeywordViolation\n\n \"\"\"\n self.add_error(WrongKeywordViolation(node))\n self.generic_visit(node)\n\n def visit_Nonlocal(self, node: ast.Nonlocal) -> None:\n \"\"\"\n Used to find `nonlocal` keyword.\n\n Raises:\n WrongKeywordViolation\n\n \"\"\"\n self.add_error(WrongKeywordViolation(node))\n self.generic_visit(node)\n\n def visit_Delete(self, node: ast.Delete) -> None:\n \"\"\"\n Used to find `del` keyword.\n\n Raises:\n WrongKeywordViolation\n\n \"\"\"\n self.add_error(WrongKeywordViolation(node, text='del'))\n self.generic_visit(node)\n\n def visit_Pass(self, node: ast.Pass) -> None:\n \"\"\"\n Used to find `pass` keyword.\n\n Raises:\n WrongKeywordViolation\n\n \"\"\"\n self.add_error(WrongKeywordViolation(node))\n self.generic_visit(node)\n","sub_path":"wemake_python_styleguide/visitors/ast/wrong_keyword.py","file_name":"wrong_keyword.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"209728330","text":"import logging\nimport os\n\nfrom adventureIO.adventure_bot import AdventureBot\nfrom adventureIO.constants import Bot as BotConfig, IDS\n\n\nOWNERS = (*IDS.creators, IDS.benny)\n\nlog = logging.getLogger(__name__)\nbot = AdventureBot(command_prefix=BotConfig.prefix)\n\n\n@bot.group(name=\"git\")\nasync def git_group(ctx):\n \"\"\"Group for git commands\"\"\"\n\n\n@git_group.command(name=\"pull\")\nasync def git_pull_command(ctx):\n \"\"\"Pulls any updates for the bot from git\"\"\"\n try:\n os.system(\"git pull origin master > gitoutput.txt\")\n with open(\"gitoutput.txt\") as f:\n await ctx.send(f.read())\n os.remove(\"gitoutput.txt\")\n except Exception as e:\n await ctx.send(f\"```{e}```\")\n\n\n@bot.group(name=\"bot\")\nasync def bot_group(ctx):\n \"\"\"Group for bot commands\"\"\"\n\n\n@bot_group.command(name=\"shutdown\", aliases=(\"exit\", \"kill\"))\nasync def bot_shutdown_command(ctx):\n \"\"\"Logs the bot out, this kills the process\"\"\"\n \n if ctx.author.id not in OWNERS:\n return\n await bot.logout()\n\n\n@bot.command()\nasync def ping(ctx):\n \"\"\"Pong\"\"\"\n\n await ctx.send(\"Pong <@234048816033038337>\")\n\n\n@bot_group.command()\nasync def reload_cogs(ctx):\n \"\"\"\n Utility command to reload every cog the bot has loaded.\n\n This recreates all the class instances of the cogs,\n which in turn call the __init__ again for all of them.\n \"\"\"\n\n if ctx.author.id not in OWNERS:\n return\n temp = []\n errors = []\n\n for x in bot.extensions:\n temp.append(x)\n try:\n bot.unload_extension(x)\n bot.load_extension(x)\n except Exception as e:\n errors.append(str(e))\n\n await ctx.send(\"Done!\")\n if errors:\n error = \"\\n\".join(errors)\n await ctx.send(f\"```{error}```\")\n\n\n@bot_group.command(name=\"reload\")\nasync def reload_cog(ctx, *, cog):\n \"\"\"\n Utility command to unload and load a cog.\n\n This recreates the class instance and reloads the __init__ call\n :param cog: Name of the cog to be reloaded.\n \"\"\"\n\n if not cog.count(\".\") == 2:\n if cog.startswith(\"cogs.\"):\n cog = f\"adventureIO.{cog}\"\n else:\n cog = f\"adventureIO.cogs.{cog}\"\n\n try:\n bot.unload_extension(cog)\n bot.load_extension(cog)\n except Exception as e:\n return await ctx.send(f\"```{e}```\")\n\n await ctx.send(\"Done\")\n\n\nbot.run(BotConfig.token)\n","sub_path":"adventureIO/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"242172845","text":"\nfrom .wave_pack import convolution2\nfrom .utills import counter\nfrom .atomic_states import np\n\n\ndef get_psi_temporal(psi0: object, time, nof, f_max, solver='3'):\n noa = len(psi0.zpos)\n if solver=='4':\n from .dyson_solvers import solver4_step as solver\n dim = noa + 2*noa*(noa-1)\n elif solver=='3':\n from .dyson_solvers import solver3_step as solver\n dim = noa\n elif solver=='2':\n pass\n elif solver=='1':\n pass\n else:\n raise ValueError(\"Solver hasn't been found\")\n\n freq = np.linspace(-f_max, f_max, nof)\n decay = np.zeros((dim, nof), dtype=np.complex)\n for i, om in enumerate(freq):\n decay[:, i] = solver(psi0, om)\n counter(i, nof)\n\n decay_temp = np.zeros((dim, len(time)), dtype=np.complex)\n\n for i in range(dim):\n decay_temp[i, :] = convolution2(time, freq, decay[i, :], np.ones_like(freq))\n counter(i,dim)\n\n return time, decay_temp\n\n\ndef get_temporal_metrics(setup: object, time2, nof, f_max, solver='3'):\n t, setup_td = get_psi_temporal(setup, time2, nof, f_max, solver)\n noa = len(setup.zpos)\n if solver=='4':\n instate = np.pad(setup.campl, (0, 2 * noa * (noa - 1)), 'constant', constant_values=(0, 0))\n elif solver == '3':\n instate = setup.campl\n setup_square = np.real(np.dot(np.conj(np.transpose(setup_td)), setup_td).diagonal())\n setup_init = abs(np.dot(np.conj(np.transpose(setup_td)), instate)) ** 2\n proj = np.diag(abs(instate))\n setup_p = np.dot(proj, setup_td)\n setup_inplace = np.real(np.dot(np.conj(np.transpose(setup_p)), setup_p).diagonal())\n return setup_square, setup_init, setup_inplace # p, p0, pa\n\n\ndef get_pulses(setup: object, time2, nof, f_max, solver='3'):\n t, setup_td = get_psi_temporal(setup, time2, nof, f_max, solver)\n noa = len(setup.zpos)\n if solver=='3':\n leftstate = np.exp(-2j*np.pi* setup.zpos)\n rightstate = np.exp(2j*np.pi* setup.zpos)\n\n leftpulse = abs(np.dot(np.conj(np.transpose(setup_td)), leftstate)) ** 2\n rightpulse = abs(np.dot(np.conj(np.transpose(setup_td)), rightstate)) ** 2\n else:\n raise ValueError(\"Solver hasn't been realized yet\")\n denominator = (leftpulse.max() + rightpulse.max())/2\n return leftpulse/denominator, rightpulse/denominator\n","sub_path":"novelfss/temporal_conversion.py","file_name":"temporal_conversion.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"133547027","text":"import turtle as t\nimport math\n\n\ns = t.getscreen()\n\nbg = t.bgcolor(\"#ffffff\")\ncolor = t.color(\"#000000\")\nfill = t.fillcolor(\"#000000\")\n\ncolors = {\n\t\"Rot\": \"#fc0303\",\n\t\"Grün\": \"#24fc03\",\n\t\"Blau\": \"#0331fc\",\n\t\"Türkis\": \"#03e7fc\",\n\t\"Gelb\": \"#f0fc03\",\n\t\"Lila\": \"#f803fc\",\n\t\"Orange\": \"#fca503\",\n\t\"Pink\": \"#fc03b5\",\n\t\"Rosa\": \"#fc03b5\",\n\t\"Schwarz\": \"#000000\",\n\t\"Weiß\": \"#ffffff\"\n}\n\n#----------------------------------------------------------------------------------\n\ndef farbe (stift, hintergrund, fill):\n\tt.bgcolor(hintergrund)\n\tt.color(stift)\n\tt.fillcolor(fill)\n\ndef quad(size):\n\tt.penup()\n\tt.bk(size/2)\n\tt.rt(90)\n\tt.fd(size/2)\n\tt.lt(90)\n\tt.pendown()\n\tposition = t.pos()\n\tt.begin_fill()\n\tfor i in range(3):\n\t\tt.fd(size)\n\t\tt.lt(90)\n\tt.goto(position)\n\tt.end_fill()\n\n#----------------------------------------------------------------------------------\n\ndef dreieck (seite1, seite2, winkel1):\n\tt.begin_fill()\n\tposition = t.pos()\n\tt.fd(seite1)\n\tt.lt(180 - winkel1)\n\tt.fd(seite2)\n\tt.goto(position)\n\tt.end_fill()\n#----------------------------------------------------------------------------------\n\ndef gleichseitigesdreieck(size):\n\tt.begin_fill()\n\tt.penup()\n\tt.bk(size/2)\n\tt.rt(90)\n\tt.fd(size/2)\n\tt.lt(90)\n\tt.pendown()\n\tfor i in range(2):\n\t\tt.fd(size)\n\t\tt.lt(120)\n\tt.fd(size)\n\tt.end_fill()\n\n\n#----------------------------------------------------------------------------------\n\ndef kreis(radius, abschnitt):\n\tt.begin_fill()\n\tt.penup()\n\tt.right(90)\n\tt.forward(radius)\n\tt.left(90)\n\tt.pendown()\n\tt.circle(radius, abschnitt)\n\tt.end_fill()\n#----------------------------------------------------------------------------------\ndef rechteck(size1, size2):\n t.penup()\n t.bk(size1/2)\n t.rt(90)\n t.fd(size2/2)\n t.lt(90)\n t.pendown()\n t.begin_fill()\n t.fd(size1)\n t.lt(90)\n t.fd(size2)\n t.lt(90)\n t.fd(size1)\n position=t.pos()\n t.lt(90)\n t.fd(size2)\n t.end_fill()\n return position\n#----------------------------------------------------------------------------------\ndef stern(size):\n t.rt(108)\n t.begin_fill()\n for i in range(5):\n\t t.fd(size)\n\t t.rt(36)\n\t t.fd(size)\n\t t.rt(252)\n t.end_fill()\n\n #---------------------------------------------------------------------------------- \ndef pentagramm(size):\n t.lt(72)\n t.begin_fill()\n for i in range(5):\n t.fd(size)\n t.penup()\n #t.fd(size)\n t.pendown()\n t.fd(size)\n t.rt(180-36)\n t.end_fill()\n t.begin_fill()\n t.end_fill()\n#----------------------------------------------------------------------------------\ndef zauberstab(size):\n t.begin_fill()\n t.lt(30)\n pos = rechteck(20,size)\n t.penup()\n t.goto(pos)\n t.fd(10)\n t.rt(180-54)\n t.fd(size/4)\n t.lt(180)\n t.pendown()\n pentagramm(size/4)\n t.end_fill()\n\n#----------------------------------------------------------------------------------\n\ndef haus(size):\n quad(size)\n t.bk(size)\n t.lt(90)\n l=math.sqrt(size**2/2)\n dreieck(size,l,45)\n\n#----------------------------------------------------------------------------------\n\n\ndef ui_system():\n\tf = input(\"Möchtest du die Farben in Hexadezimal angeben?: \")\n\tif f == \"Ja\" or f == \"ja\":\n\t\tprint(\"\")\n\t\tf = input(\"Möchtest du die Farben in Hexadezimal angeben?: \")\n\t\tif f == \"Ja\" or f == \"ja\":\n\t\t\tprint(\"\")\n\t\t\tt.bgcolor(input(\"Wähle eine Stiftfarbe: \"))\n\t\t\tprint(\"\")\n\t\t\tt.color(input(\"Wähle eine Hintergrundfarbe: \"))\n\t\t\tprint(\"\")\n\t\t\tt.fillcolor(input(\"Wähle eine Füllfarbe: \"))\n\t\t\tprint(\"\")\n\t\telse:\n\t\t\ts = input (\"Wähle eine Stiftfarbe: \")\n\t\t\tprint(\"\")\n\t\t\th = input (\"Wähle eine Hintergrundfarbe: \")\n\t\t\tprint(\"\")\n\t\t\tf = input (\"Wähle eine Füllfarbe: \")\n\t\t\tprint(\"\")\n\t\t\tfarbe(colors.get(s), colors.get(h), colors.get(f))\n\n\tw_i = input(\"Möchtest du die Größe des Stiftes verändern?: \")\n\n\tif w_i == \"Ja\" or w_i == \"ja\":\n\t\twidth_pen = input(\"Wie groß soll der Stift sein?: \")\n\t\ttry:\n\t\t\twidth_pen = int(width_pen)\n\t\t\tt.width(width_pen)\n\t\texcept:\n\t\t\tprint(\"Error! Bitte gebe als Größe nur eine Zahl ein\")\n\t\t\tui_system()\n\n\tprint(\"Es gibt:\\nKreis\\nQuadrat\\ngleichseitiges Dreieck\\nDreieck\\nregelmäßiges n-Eck\\nStern\\nPentagramm\\nHaus\\nRechteck\\nZauberstab\")\n\tprint(\".\\n.\\n.\")\n\tx = input(\"Wähle eine Form: \")\n\tif x == \"Kreis\":\n\t\tr = float(input (\"Wähle einen Radius: \"))\n\t\ta = float(input (\"Wähle einen Abschnitt: \"))\n\t\tkreis (r, a)\n\telif x == \"Quadrat\":\n\t\tl = float(input (\"Wähle eine Seitenlänge: \"))\n\t\tquad(l)\n\telif x == \"Rechteck\":\n\t\tl = float(input(\"Wähle eine Seitenlänge: \"))\n\t\tw = float(input(\"Wähle eine Seitenbreite: \"))\n\t\trechteck(l, w)\n\telif x == \"Stern\":\n\t\tl = float(input (\"Wähle eine Seitenlänge: \"))\n\t\tstern(l)\n\telif x == \"Pentagramm\":\n\t\tl = float(input (\"Wähle eine Seitenlänge: \"))\n\t\tpentagramm(l)\n\telif x == \"Haus\":\n\t\tl=float(input(\"Wähle eine Seitenlänge: \"))\n\t\thaus(l)\n\telif x == \"Zauberstab\":\n\t\tl = float(input (\"Wähle eine Stablänge: \"))\n\t\tzauberstab(l)\n\telif x == \"gleichseitiges Dreieck\":\n\t\tl = float(input (\"Wähle eine Seitenlänge: \"))\n\t\tgleichseitigesdreieck (l)\n\telif x == \"Dreieck\":\n\t\tl1 = float(input(\"Wähle eine Seitenlänge für die erste Seite: \"))\n\t\tl2 = float(input(\"Wähle eine Seitenlänge für die zweite Seite: \"))\n\t\tw1 = float(input(\"Wähle einen ersten Winkel für das Dreieck: \"))\n\n\n\t\tif (w1 >= 180):\n\t\t\tprint(\"Error!\\n Das Dreieck konnte wegen zu großen Winkeln nicht erstellt werden.\")\n\t\t\tui_system()\n\t\telse:\n\t\t\tdreieck(l1, l2, w1)\n\t\t\n\telse:\n\t\tprint(\"Error! Bitte schreibe nur eine der möglichen Antworten\")\n\t\tui_system()","sub_path":"Code/Emmett.py","file_name":"Emmett.py","file_ext":"py","file_size_in_byte":5363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"262895124","text":"\n\nfrom xai.brain.wordbase.nouns._administrator import _ADMINISTRATOR\n\n#calss header\nclass _ADMINISTRATORS(_ADMINISTRATOR, ):\n\tdef __init__(self,): \n\t\t_ADMINISTRATOR.__init__(self)\n\t\tself.name = \"ADMINISTRATORS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"administrator\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_administrators.py","file_name":"_administrators.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"228919468","text":"#-------------------------------------------------------------------------------\n# Name: Exercise 1: Safe Open\n# Purpose:\n#\n# Author: yizhou\n#\n# Created: 06/02/2016\n# Copyright: (c) yizhou 2016\n# Licence: \n#-------------------------------------------------------------------------------\n\ndef safe_open(fileName, mode):\n try:\n if mode == 'read':\n open(fileName,'r')\n elif mode == 'write':\n open(fileName,'w')\n elif mode == 'both':\n open(fileName)\n except IOError:\n print('I/O error: No such file or directory')\n return None\n else: return True\n\ndef main():\n fileName =input('Enter the file name to read:')\n mode = input('For reading or writing or both?\\nEnter \\'read\\',\\'write\\', or \\'both\\'')\n while mode not in 'readwriteboth':\n print('Invalid input, please check your spell.')\n mode = input('For reading or writing or both?\\nEnter \\'read\\',\\'write\\', or \\'both\\'')\n f = safe_open(fileName, mode)\n print('f == None:', f==None)\n\nif __name__ == '__main__':\n main()\n","sub_path":"Lab5/Exercise1.py","file_name":"Exercise1.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"619552620","text":"'''\r\nCreated on Apr 16, 2016\r\n\r\n@author: Hari\r\n'''\r\n\r\ndef createMaxStr(inputStr):\r\n newStr = inputStr[0];\r\n remStr = inputStr[1:]\r\n for ch in remStr:\r\n if ch >= newStr[0]:\r\n newStr = ch + newStr;\r\n else:\r\n newStr = newStr + ch;\r\n return newStr;\r\n\r\nt = int(input());\r\nfor i in range(1, t + 1):\r\n origStr = input();\r\n print(\"Case #{}: {}\".format(i, createMaxStr(origStr))); \r\n \r\n\r\n \r\n ","sub_path":"codes/CodeJamCrawler/CJ_16_1/16_1_1_neo_hmm_lastword.py","file_name":"16_1_1_neo_hmm_lastword.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"62563230","text":"# -*- coding: utf-8 -*-\nimport json\nfrom netaporter_elasticsearch.items import NetaporterElasticsearchItem\nfrom scrapy import Spider\nfrom scrapy.http import Request\n\nclass NetaporterSpider(Spider):\n name = 'netaporter'\n allowed_domains = ['net-a-porter.com']\n start_urls = ['https://www.net-a-porter.com/kr/en/d/Shop/Whats-New/Now']\n\n def parse(self, response):\n # print 'response.encoding value: ' + response.encoding\n clothing_url = response.xpath('//ul[@id=\"subnav\"]/li[2]/a/@href').extract_first()\n absolute_clothing_url = response.urljoin(clothing_url)\n yield Request(absolute_clothing_url,\n callback=self.parse_category,\n meta={'category': \"clothing\"})\n\n shoes_url = response.xpath('//ul[@id=\"subnav\"]/li[3]/a/@href').extract_first()\n absolute_shoes_url = response.urljoin(shoes_url)\n yield Request(absolute_shoes_url,\n callback=self.parse_category,\n meta={'category': \"shoes\"})\n\n bags_url = response.xpath('//ul[@id=\"subnav\"]/li[4]/a/@href').extract_first()\n absolute_bags_url = response.urljoin(bags_url)\n yield Request(absolute_bags_url,\n callback=self.parse_category,\n meta={'category': \"bags\"})\n\n accessories_url = response.xpath('//ul[@id=\"subnav\"]/li[5]/a/@href').extract_first()\n absolute_accessories_url = response.urljoin(accessories_url)\n yield Request(absolute_accessories_url,\n callback=self.parse_category,\n meta={'category': \"accessories\"})\n\n def parse_category(self, response):\n item_urls = response.xpath('//*[@id=\"product-list\"]//div[@class=\"product-image\"]/a/@href').extract()\n for item_url in item_urls:\n absolute_url = response.urljoin(item_url)\n yield Request(absolute_url, callback=self.parse_item, meta={\n 'category': response.meta['category'],\n 'relative_url': item_url\n })\n\n relative_url = response.xpath('//*[@id=\"product-list-menu\"]//a[@class=\"previous-page\"]/@href').extract_first().split('?')[0]\n current_page_num = response.xpath('//div[@id=\"product-list-menu\"]//span[@class=\"pagination-page-current\"]/text()').extract_first()\n next_page_num = int(current_page_num) + 1\n # absolute_next_url = response.urljoin('{}/all?pn={}'.format(relative_url, next_page_num))\n absolute_next_url = response.urljoin(response.xpath('//*[@class=\"next-page\"]/@href').extract_first())\n print('absolute_next_url')\n print(absolute_next_url)\n\n yield Request(absolute_next_url, callback=self.parse_category)\n\n def parse_item(self, response):\n price_json = response.xpath('//nap-price[@class=\"product-price\"]/@price').extract_first()\n price_json = json.loads(price_json)\n item = NetaporterElasticsearchItem()\n item['id'] = response.meta['relative_url'].replace('/', '')[:17]\n item['url'] = response.url\n item['brand'] = response.xpath('//a[@class=\"designer-name\"]/span/text()').extract_first()\n item['item_name'] = response.xpath('//h2[@class=\"product-name\"]/text()').extract_first()\n item['product_code'] = response.xpath('//div[@class=\"top-product-code\"]//span/text()').extract_first()\n item['category'] = response.meta['category']\n item['price'] = price_json['amount']\n\n item['image_array'] = []\n\n image1 = response.xpath('//*[@id=\"main-image-carousel\"]/ul/li[1]/img/@src').extract_first()\n image2 = response.xpath('//*[@id=\"main-image-carousel\"]/ul/li[2]/img/@src').extract_first()\n image3 = response.xpath('//*[@id=\"main-image-carousel\"]/ul/li[3]/img/@src').extract_first()\n image4 = response.xpath('//*[@id=\"main-image-carousel\"]/ul/li[4]/img/@src').extract_first()\n image5 = response.xpath('//*[@id=\"main-image-carousel\"]/ul/li[5]/img/@src').extract_first()\n\n item['image_array'].append('http:' + image2.replace('_xs', '_pp'))\n item['image_array'].append('http:' + image1.replace('_xs', '_pp'))\n item['image_array'].append('http:' + image3.replace('_xs', '_pp'))\n item['image_array'].append('http:' + image4.replace('_xs', '_pp'))\n item['image_array'].append('http:' + image5.replace('_xs', '_pp'))\n\n yield item\n","sub_path":"netaporter_elasticsearch/spiders/netaporter.py","file_name":"netaporter.py","file_ext":"py","file_size_in_byte":4395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"343945895","text":"import unittest\nfrom typing import List, Dict\n\nimport tokenizers\nfrom transformers import AutoTokenizer\n\nimport data_operations\nfrom crypto_news import CryptoNews\nfrom crypto_news_dataset import CryptoNewsDataset\n\n\nclass TestDataOperations(unittest.TestCase):\n def setUp(self) -> None:\n self.tr_data_path = \"../data/crypto_news_parsed_2013-2017_train.csv\"\n self.tst_data_path = \"../data/crypto_news_parsed_2018_validation.csv\"\n\n def test_read_tr_data(self):\n tr_data = data_operations.read_data(self.tr_data_path, num_rows=10)\n self.assertIsInstance(tr_data[0], CryptoNews)\n\n def test_prepare_data(self):\n tst_data = data_operations.read_data(self.tst_data_path, num_rows=10)\n tokenizer = AutoTokenizer.from_pretrained(\"t5-base\")\n max_len = 10\n prepared_tst_data = data_operations.prepare_data(tst_data, tokenizer, max_len)\n self.assertIsInstance(prepared_tst_data, CryptoNewsDataset)\n\n def test_text_preprocessing(self):\n txt = \" Bitcoin Price Update: Will China Lead us Down?\\r\\n \"\n expected = \"bitcoin price update: will china lead us down?\"\n self.assertEqual(expected, data_operations.text_preprocessing(txt))\n","sub_path":"test/test_data_operations.py","file_name":"test_data_operations.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"394112096","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /app/sms/migrations/0001_initial.py\n# Compiled at: 2019-09-24 05:06:42\n# Size of source mod 2**32: 1727 bytes\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [\n migrations.CreateModel(name='Template',\n fields=[\n (\n 'id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n (\n 'label', models.CharField(max_length=200, verbose_name='label')),\n (\n 'code', models.CharField(max_length=50, unique=True, verbose_name='code')),\n (\n 'content', models.TextField(blank=True, verbose_name='content'))],\n options={'verbose_name':'sms template', \n 'verbose_name_plural':'sms template'}),\n migrations.CreateModel(name='Log',\n fields=[\n (\n 'id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n (\n 'mobile', models.CharField(max_length=15, verbose_name='mobile')),\n (\n 'content', models.TextField(verbose_name='content')),\n (\n 'is_success', models.BooleanField(default=True, verbose_name='is success')),\n (\n 'created', models.DateTimeField(auto_now_add=True, verbose_name='created')),\n (\n 'template', models.ForeignKey(on_delete=(django.db.models.deletion.CASCADE), related_name='记录', to='sms.Template'))],\n options={'verbose_name':'sms log', \n 'verbose_name_plural':'sms log', \n 'ordering':[\n '-created']})]","sub_path":"pycfiles/django-sms-cn-0.1.1.tar/0001_initial.cpython-37.py","file_name":"0001_initial.cpython-37.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"112426492","text":"import pytest\nimport sys\nfrom math import isclose\nfrom mock import patch, call\nfrom pathlib import Path\nfrom textwrap import dedent\n\nfrom phykit.phykit import Phykit\n\nhere = Path(__file__)\n\n\n@pytest.mark.integration\nclass TestTree(object):\n @patch(\"builtins.print\")\n def test_treeness_over_rcv(self, mocked_print):\n expected_result = \"0.35\\t0.126\\t0.36\"\n testargs = [\n \"phykit\",\n \"treeness_over_rcv\",\n \"-t\",\n f\"{here.parent.parent}/sample_files/tree_simple.tre\",\n \"-a\",\n f\"{here.parent.parent}/sample_files/simple.fa\",\n ]\n with patch.object(sys, \"argv\", testargs):\n Phykit()\n assert mocked_print.mock_calls == [call(expected_result)]","sub_path":"tests/integration/test_tree.py","file_name":"test_tree.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"296251989","text":"#!/usr/bin/python\n\n\"\"\"\nApply FreeSurfer's \"Talairach\" transform matrix to take an unconformed volume to a standard space.\n\nCommand: python transform_unconformed_volume_to_standard_space.py\n \n\nExample: python transform_unconformed_volume_to_standard_space.py\n subject1.nii.gz /Applications/freesurfer/subjects/bert output/ bert nearest\n\nSee http://surfer.nmr.mgh.harvard.edu/fswiki/mri_convert\n\n2011 Arno Klein (arno@mindboggle.info)\nApache License, Version 2.0\n\n# Example: Run transform_unconformed_volume_to_standard_space on a directory:\nimport os\noutput_path = 'transformed_brainvisa_fundus_volumes_Perrot62'\nsubjects_path = '/home/arno/Data/Brains/Perrot62_sulci/freesurfer5.1_output_plus_surface_features/'\nvolumes_path = '/home/arno/Data/Brains/Perrot62_sulci/manually_labeled_brainvisa_fundi/sulci_volumes/'\nsubjects = os.listdir(subjects_path)\nvolumes = os.listdir(volumes_path)\nfor i,volume in enumerate(volumes):\n args = ['python transform_unconformed_volume_to_standard_space.py',\n volumes_path+volume, subjects_path+subjects[i], output_path, volume, 'nearest']\n print(\" \".join(args)); os.system(\" \".join(args)); # p = Popen(args); p.close()\n\n\"\"\"\n\nimport os, sys\n\n# Check inputs\nif len(sys.argv) < 6:\n print(\"Usage: python transform_unconformed_volume_to_standard_space.py \")\n exit(-1)\nelse:\n native_volume_nii = sys.argv[1]\n subject_path = sys.argv[2] + '/'\n output_path = sys.argv[3] + '/'\n output_name = sys.argv[4]\n interpolation = sys.argv[5] # Ex: nearest, interpolate, sinc, cubic\n\n# Apply FreeSurfer's \"Talairach\" transform matrix to take a conformed volume to standard space:\nif os.path.exists(native_volume_nii):\n pass\nelse:\n print(native_volume_nii + \" doesn't exist.\")\n exit(-1)\n\nconformed_volume_mgz = output_path + output_name + '.conformed.mgz'\ntransformed_volume_nii = output_path + output_name + '.transformed.nii.gz'\nxfm = subject_path + 'mri/transforms/talairach.xfm'\n\nargs = ['mri_convert --conform -rt', interpolation, native_volume_nii, conformed_volume_mgz]\nprint(\" \".join(args)); os.system(\" \".join(args)); # p = Popen(args); p.close()\nargs = ['mri_convert --apply_transform', xfm, '-rt',interpolation, conformed_volume_mgz, transformed_volume_nii]\nprint(\" \".join(args)); os.system(\" \".join(args)); # p = Popen(args); p.close()\n","sub_path":"transform_unconformed_volume_to_standard_space.py","file_name":"transform_unconformed_volume_to_standard_space.py","file_ext":"py","file_size_in_byte":2534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"487774187","text":"import pandas as pd\r\nimport numpy as np\r\n\r\nsyn_table = {}\r\nlist_pairs_ppi = []\r\n\r\n'''\r\ntoy = pd.read_csv(\"ToyPPIData.txt\", \"\\t\")\r\nppi_1 = np.array(toy.values[:, 0:1])\r\nppi_2 = np.array(toy.values[:, 1:2])\r\nfor index in range(ppi_1.shape[0]):\r\n if ppi_1[index, 0] not in list_ppi:\r\n list_ppi.append(ppi_1[index, 0])\r\nfor index in range(ppi_2.shape[0]):\r\n if ppi_2[index, 0] not in list_ppi:\r\n list_ppi.append(ppi_2[index, 0])\r\n'''\r\n\r\ndf = pd.read_csv(\"BIOGRID-ALL-3.4.152.tab2.txt\", \"\\t\")\r\nppi = np.array(df.values[:, 7:9])\r\nname = np.array(df.values[:, 15:17])\r\n\r\n\r\nwith open(\"ppi_biogrid.txt\", \"w\") as f:\r\n for index in range(ppi.shape[0]):\r\n if name[index, 0] == 9606 and name[index, 1] == 9606:\r\n print(\"9606!!!\")\r\n if [ppi[index, 0], ppi[index, 1]] not in list_pairs_ppi or [ppi[index, 1], ppi[index, 0]] not in list_pairs_ppi:\r\n f.write(ppi[index, 0] + \"\\t\" + ppi[index, 1] + \"\\t1\\n\")\r\n list_pairs_ppi.append([ppi[index, 0], ppi[index, 1]])\r\n print(\"Added \" + ppi[index, 0] + \"\\t\" + ppi[index, 1] + \"\\t1\")\r\n else:\r\n print(\"Duplicate!\")\r\nf.close()\r\n\r\n\r\n'''\r\nppi_A = np.array(df.values[:, 7:8])\r\nsyn_A = np.array(df.values[:, 9:10])\r\nppi_B = np.array(df.values[:, 8:9])\r\nsyn_B = np.array(df.values[:, 10:11])\r\nfor index in range(ppi_A.shape[0]):\r\n if ppi_A[index, 0] not in syn_table.keys():\r\n syn_table[ppi_A[index, 0]] = syn_A[index, 0].split(\"|\")\r\n print(\"Split!\")\r\n if ppi_B[index, 0] not in syn_table.keys():\r\n syn_table[ppi_B[index, 0]] = syn_B[index, 0].split(\"|\")\r\n print(\"Split!\") \r\n'''\r\n\r\n'''\r\nwith open(\"ToyPPIData.txt\", \"w\") as f:\r\n for index in range(ppi_A.shape[0]):\r\n write_A = ppi_A[index, 0]\r\n for syn in syn_table[ppi_A[index, 0]]:\r\n if syn in list_ppi:\r\n write_A = syn\r\n print(\"Synonym Found!_A\")\r\n write_B = ppi_B[index, 0]\r\n for syn in syn_table[ppi_B[index, 0]]:\r\n if syn in list_ppi:\r\n write_B = syn\r\n print(\"Synonym Found!_B\")\r\n f.write(write_A + \"\\t\" + write_B + \"\\t1\\n\")\r\nf.close()\r\n'''\r\n\r\n\r\n","sub_path":"alchemist.py","file_name":"alchemist.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"97993546","text":"from Cook.logger_script import logger\n\nkeyholder = ['Ingredient_name', 'quantity', 'measure']\n\n@logger('Log/')\ndef shop_list(dishes_list, person_amount, recipe_book):\n to_buy_list = {}\n for dish in dishes_list:\n dish = dish.capitalize()\n list_to_decompose = recipe_book.get(dish, f'Ошибка: такого блюда нет.\\n')\n if 'Ошибка' in list_to_decompose:\n user_answer = input(' Продолжить y/n?\\n')\n if user_answer.lower() == 'y':\n continue\n elif user_answer.lower() == 'n':\n print('Ошибка: некорректно введенное название блюда')\n return None\n else:\n print('Неопознанная команда')\n return None\n else:\n for ingredient in list_to_decompose:\n if to_buy_list.get(ingredient['Ingredient_name'], 'Error') == 'Error':\n temporary_dictionary = {'measure': ingredient['measure'],\n 'quantity': int(ingredient['quantity']) * person_amount}\n to_buy_list[ingredient['Ingredient_name']] = temporary_dictionary\n else:\n to_buy_list[ingredient['Ingredient_name']]['quantity'] = to_buy_list[ingredient['Ingredient_name']]['quantity'] + int(ingredient['quantity']) * person_amount\n for key, value in to_buy_list.items():\n print(f'{key}: {value}')\n return to_buy_list\n\n\ndef cook_book_writing(recipe_file):\n cook_book = {}\n with open(recipe_file, encoding='utf-8') as recipe_book:\n for line in recipe_book:\n line = line.lstrip('\\ufeff')\n temporary_list = []\n for _ in range(int(recipe_book.readline())):\n temporary_dict = {}\n current_ingredient = recipe_book.readline()\n recipe_decompose = current_ingredient.split(' | ')\n recipe_decompose[2] = recipe_decompose[2].rstrip()\n for cycle in range(3):\n temporary_dict[keyholder[cycle]] = recipe_decompose[cycle]\n temporary_list.append(temporary_dict)\n cook_book[line.strip()] = temporary_list\n recipe_book.readline()\n return cook_book\n\n#ниже вывод печати для задачи №1\nprint(cook_book_writing('recipes.txt'))\n\n#Ниже реализована функция с пользовательским вводом.\nuser_input = input('Пожалуйста, введите названия блюд через запятую и пробел\\n')\nbuy_list = user_input.split(', ')\ntry:\n user_amount = int(input('Пожалуйста, введите количество персон:\\n'))\n shop_list(buy_list, user_amount, cook_book_writing('recipes.txt'))\nexcept ValueError:\n print('Введено не число')\n\n#Ниже убрана в комментарии реализация функции с вводом списка блюд и количества персон внутри кода:\n# user_list = []\n# user_amount = 1\n# shop_list(buy_list, user_amount)","sub_path":"cookbook.py","file_name":"cookbook.py","file_ext":"py","file_size_in_byte":3191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"122727178","text":"#50.\tEscribe un algoritmo o el respectivo diagrama de flujo para leer una cantidad variable de números e indicar el promedio de los números pares y el promedio de los números impares.\r\nn = int(input(\"Ingrese la cantidad de números que quiera ingresar: \"))\r\npares = 0\r\nsumpares = 0\r\nimpares = 0\r\nsumimpares = 0\r\nfor i in range(n):\r\n numero = int(input(\"Ingrese un número: \"))\r\n if numero % 2 == 0:\r\n pares = pares+1\r\n sumpares = sumpares+numero\r\n else:\r\n impares = impares+1\r\n sumimpares = sumimpares+numero\r\n\r\nprint(\"La el promedio de los números pares = \", sumpares/pares)\r\nprint(\"La el promedio de los números impares = \", sumimpares/impares)","sub_path":"Punto 50.py","file_name":"Punto 50.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"147240443","text":"import tensorflow as tf\r\nimport numpy as np\r\n\r\nfrom util import *\r\nfrom tensorflow.contrib.rnn import GRUCell\r\nfrom tensorflow.python.ops.rnn import bidirectional_dynamic_rnn as bi_rnn\r\nclass cfr_net(object):\r\n \"\"\"\r\n cfr_net implements the counterfactual regression neural network\r\n by F. Johansson, U. Shalit and D. Sontag: https://arxiv.org/abs/1606.03976\r\n\r\n This file contains the class cfr_net as well as helper functions.\r\n The network is implemented as a tensorflow graph. The class constructor\r\n creates an object containing relevant TF nodes as member variables.\r\n \"\"\"\r\n\r\n def __init__(self, x, t, y_ , p_t, FLAGS, r_alpha, r_lambda, do_in, do_out, dims, test):\r\n self.variables = {}\r\n self.wd_loss = 0\r\n self.attention_size=50\r\n self.seq_len_ph=50\r\n self.test=test\r\n\r\n if FLAGS.nonlin.lower() == 'elu':\r\n self.nonlin = tf.nn.elu\r\n else:\r\n self.nonlin = tf.nn.relu\r\n\r\n self._build_graph(x, t, y_ , p_t, FLAGS, r_alpha, r_lambda, do_in, do_out, dims)\r\n\r\n def _add_variable(self, var, name):\r\n ''' Adds variables to the internal track-keeper '''\r\n basename = name\r\n i = 0\r\n while name in self.variables:\r\n name = '%s_%d' % (basename, i) #@TODO: not consistent with TF internally if changed\r\n i += 1\r\n\r\n self.variables[name] = var\r\n\r\n def _create_variable(self, var, name):\r\n ''' Create and adds variables to the internal track-keeper '''\r\n\r\n var = tf.Variable(var, name=name)\r\n self._add_variable(var, name)\r\n return var\r\n\r\n def _create_variable_with_weight_decay(self, initializer, name, wd):\r\n ''' Create and adds variables to the internal track-keeper\r\n and adds it to the list of weight decayed variables '''\r\n var = self._create_variable(initializer, name)\r\n self.wd_loss += wd*tf.nn.l2_loss(var)\r\n return var\r\n\r\n def _build_graph(self, x, t, y_ , p_t, FLAGS, r_alpha, r_lambda, do_in, do_out, dims):\r\n \"\"\"\r\n Constructs a TensorFlow subgraph for counterfactual regression.\r\n Sets the following member variables (to TF nodes):\r\n\r\n self.output The output prediction \"y\"\r\n self.tot_loss The total objective to minimize\r\n self.imb_loss The imbalance term of the objective\r\n self.pred_loss The prediction term of the objective\r\n self.weights_in The input/representation layer weights\r\n self.weights_out The output/post-representation layer weights\r\n self.weights_pred The (linear) prediction layer weights\r\n self.h_rep The layer of the penalized representation\r\n \"\"\"\r\n\r\n self.x = x\r\n self.t = t\r\n self.y_ = y_\r\n self.p_t = p_t\r\n self.r_alpha = r_alpha\r\n self.r_lambda = r_lambda\r\n self.do_in = do_in\r\n self.do_out = do_out\r\n\r\n dim_input = dims[0]\r\n dim_in = dims[1]\r\n dim_out = dims[2]\r\n\r\n\r\n if FLAGS.n_in == 0 or (FLAGS.n_in == 1 and FLAGS.varsel):\r\n dim_in = dim_input\r\n if FLAGS.n_out == 0:\r\n if FLAGS.split_output == False:\r\n dim_out = dim_in+1\r\n else:\r\n dim_out = dim_in\r\n\r\n regularizer=tf.contrib.layers.l2_regularizer(scale=1.0)\r\n ''' Construct input/representation layers '''\r\n self.phi=tf.layers.dense(x,dim_in,activation=tf.nn.relu,kernel_regularizer=regularizer)\r\n ''' Regularization '''\r\n self.wd_loss+=tf.losses.get_regularization_loss()\r\n\r\n rnn_outputs, _ = bi_rnn(GRUCell(dim_in), GRUCell(dim_in),\r\n inputs=self.phi, dtype=tf.float32)\r\n\r\n h_rep = tf.concat([rnn_outputs[0],rnn_outputs[1]],2) #`[batch_size, max_time, cell.output_size]`\r\n\r\n if FLAGS.normalization == 'divide':\r\n h_rep_norm = h_rep / safe_sqrt(tf.reduce_sum(tf.square(h_rep), axis=1, keep_dims=True))\r\n else:\r\n h_rep_norm = 1.0*h_rep\r\n\r\n ''' Construct ouput layers '''\r\n y = self._build_output_graph(rnn_outputs, t, dim_in, dim_out, do_out, FLAGS) # y contains all the predictions in the sequence\r\n\r\n ''' Compute sample reweighting '''\r\n if FLAGS.reweight_sample:\r\n w_t = t/(2*p_t)\r\n w_c = (1-t)/(2*1-p_t)\r\n sample_weight = w_t + w_c\r\n else:\r\n sample_weight = 1.0\r\n\r\n self.sample_weight = sample_weight\r\n\r\n ''' Construct factual loss function '''\r\n y_seq=y[:,0:self.seq_len_ph]\r\n y_T=y[:,self.seq_len_ph]\r\n y_seq_ = y_[:, 0:self.seq_len_ph]\r\n y_T_ = y_[:, self.seq_len_ph]\r\n if FLAGS.loss == 'l1':\r\n risk1 = tf.reduce_mean(sample_weight*tf.abs(y_seq_-y_seq))\r\n risk2 = tf.reduce_mean(sample_weight * tf.abs(y_T_ - y_T))\r\n risk = risk1 + self.test*risk2\r\n pred_error_1 = tf.reduce_mean(tf.abs(y_seq_ - y_seq))\r\n pred_error_2 = tf.reduce_mean(tf.abs(y_T_ - y_T))\r\n pred_error = pred_error_1 + self.test * pred_error_2\r\n elif FLAGS.loss == 'log':\r\n y = 0.995/(1.0+tf.exp(-y)) + 0.0025\r\n res = y_*tf.log(y) + (1.0-y_)*tf.log(1.0-y)\r\n\r\n res_seq = res[:, 0:self.seq_len_ph]\r\n res_T = res[:, self.seq_len_ph]\r\n risk1 = -tf.reduce_mean(sample_weight*res_seq)\r\n risk2 = -tf.reduce_mean(sample_weight * res_T)\r\n risk=risk1+self.test *risk2\r\n\r\n pred_error1 = -tf.reduce_mean(res_seq)\r\n pred_error2 = -tf.reduce_mean(res_T)\r\n pred_error= pred_error1+self.test *pred_error2\r\n else:\r\n risk1 = tf.reduce_mean(sample_weight*tf.square(y_seq_ - y_seq))\r\n risk2 = tf.reduce_mean(sample_weight * tf.square(y_T_ - y_T))\r\n risk = risk1 + self.test * risk2\r\n pred_error1 = tf.sqrt(tf.reduce_mean(tf.square(y_seq_ - y_seq)))\r\n pred_error2 = tf.sqrt(tf.reduce_mean(tf.square(y_T_ - y_T)))\r\n pred_error = pred_error1 + self.test * pred_error2\r\n\r\n ''' Imbalance error '''\r\n if FLAGS.use_p_correction:\r\n p_ipm = self.p_t\r\n else:\r\n p_ipm = 0.5\r\n\r\n if FLAGS.imb_fun == 'mmd2_rbf':\r\n imb_dist = mmd2_rbf(h_rep_norm,t,p_ipm,FLAGS.rbf_sigma)\r\n imb_error = r_alpha*imb_dist\r\n elif FLAGS.imb_fun == 'mmd2_lin':\r\n imb_dist = mmd2_lin(h_rep_norm,t,p_ipm)\r\n imb_error = r_alpha*mmd2_lin(h_rep_norm,t,p_ipm)\r\n elif FLAGS.imb_fun == 'mmd_rbf':\r\n imb_dist = tf.abs(mmd2_rbf(h_rep_norm,t,p_ipm,FLAGS.rbf_sigma))\r\n imb_error = safe_sqrt(tf.square(r_alpha)*imb_dist)\r\n elif FLAGS.imb_fun == 'mmd_lin':\r\n imb_dist = mmd2_lin(h_rep_norm,t,p_ipm)\r\n imb_error = safe_sqrt(tf.square(r_alpha)*imb_dist)\r\n elif FLAGS.imb_fun == 'wass':\r\n imb_dist = wasserstein(self.seq_len_ph, h_rep_norm,t,p_ipm,lam=FLAGS.wass_lambda,its=FLAGS.wass_iterations,sq=False,backpropT=FLAGS.wass_bpt)\r\n imb_error = r_alpha * imb_dist\r\n #self.imb_mat = imb_mat # FOR DEBUG\r\n elif FLAGS.imb_fun == 'wass2':\r\n imb_dist = wasserstein(h_rep_norm,t,p_ipm,lam=FLAGS.wass_lambda,its=FLAGS.wass_iterations,sq=True,backpropT=FLAGS.wass_bpt)\r\n imb_error = r_alpha * imb_dist\r\n #self.imb_mat = imb_mat # FOR DEBUG\r\n else:\r\n imb_dist = lindisc(h_rep_norm,p_ipm,t)\r\n imb_error = r_alpha * imb_dist\r\n\r\n ''' Total error '''\r\n tot_error = risk\r\n\r\n if FLAGS.p_alpha>0:\r\n tot_error = tot_error + imb_error\r\n\r\n if FLAGS.p_lambda>0:\r\n tot_error = tot_error + r_lambda*self.wd_loss\r\n\r\n\r\n self.output = y\r\n self.tot_loss = tot_error\r\n self.imb_loss = imb_error\r\n self.imb_dist = imb_dist\r\n self.pred_loss = pred_error\r\n self.h_rep = h_rep\r\n self.h_rep_norm = h_rep_norm\r\n\r\n def attention(self, inputs, attention_size, time_major=False, return_alphas=False):\r\n \"\"\"\r\n Attention mechanism layer which reduces RNN outputs with Attention vector.\r\n\r\n Args:\r\n inputs: The Attention inputs.\r\n Matches outputs of RNN layer (not final state):\r\n In case of RNN, this must be RNN outputs `Tensor`:\r\n If time_major == False (default), this must be a tensor of shape:\r\n `[batch_size, max_time, cell.output_size]`.\r\n If time_major == True, this must be a tensor of shape:\r\n `[max_time, batch_size, cell.output_size]`.\r\n attention_size: Linear size of the Attention weights.\r\n time_major: The shape format of the `inputs` Tensors.\r\n If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.\r\n If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.\r\n Using `time_major = True` is a bit more efficient because it avoids\r\n transposes at the beginning and end of the RNN calculation. However,\r\n most TensorFlow data is batch-major, so by default this function\r\n accepts input and emits output in batch-major form.\r\n return_alphas: Whether to return attention coefficients variable along with layer's output.\r\n Used for visualization purpose.\r\n Returns:\r\n The Attention output `Tensor`.\r\n In case of RNN, this will be a `Tensor` shaped:\r\n `[batch_size, cell.output_size]`.\r\n In case of Bidirectional RNN, this will be a `Tensor` shaped:\r\n `[batch_size, cell_fw.output_size + cell_bw.output_size]`.\r\n \"\"\"\r\n\r\n if time_major:\r\n # (T,B,D) => (B,T,D)\r\n inputs = tf.array_ops.transpose(inputs, [1, 0, 2])\r\n\r\n hidden_size = inputs.shape[2].value # D value - hidden size of the RNN layer\r\n\r\n # Trainable parameters\r\n w_omega=self._create_variable(tf.random_normal([hidden_size, attention_size],\r\n stddev=FLAGS.weight_init / np.sqrt(hidden_size)), 'w_omega')\r\n b_omega=self._create_variable(tf.random_normal([attention_size],\r\n stddev=FLAGS.weight_init / np.sqrt(attention_size)), 'b_omega')\r\n u_omega = self._create_variable(tf.random_normal([attention_size],\r\n stddev=FLAGS.weight_init / np.sqrt(attention_size)), 'u_omega')\r\n\r\n with tf.name_scope('v'):\r\n # Applying fully connected layer with non-linear activation to each of the B*T timestamps;\r\n # the shape of `v` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size\r\n v = tf.tanh(tf.tensordot(inputs, w_omega, axes=1) + b_omega)\r\n\r\n # For each of the timestamps its vector of size A from `v` is reduced with `u` vector\r\n vu = tf.tensordot(v, u_omega, axes=1, name='vu') # (B,T) shape\r\n alphas = tf.nn.softmax(vu, name='alphas') # (B,T) shape\r\n\r\n # Output of (Bi-)RNN is reduced with attention vector; the result has (B,D) shape\r\n output = tf.reduce_sum(inputs * tf.expand_dims(alphas, -1), 1)\r\n\r\n if not return_alphas:\r\n return output\r\n else:\r\n return output, alphas\r\n\r\n def _build_output(self, phi, rep_fw,rep_bw, dim_in, dim_out, do_out, FLAGS):\r\n\r\n h_input = tf.concat([rep_fw,rep_bw],2) #`[batch_size, max_time, cell.output_size]`\r\n if FLAGS.normalization == 'divide':\r\n h_input = h_input / safe_sqrt(tf.reduce_sum(tf.square(h_input), axis=1, keep_dims=True))\r\n else:\r\n h_input = 1.0*h_input\r\n\r\n y_seq = tf.layers.dense(h_input,1,activation=tf.nn.relu)\r\n\r\n attention_output = self.attention(h_input, self.attention_size)\r\n h_out = [attention_output]\r\n dims = [2 * dim_in] + ([dim_out] * FLAGS.n_out)\r\n\r\n weights_out = []\r\n biases_out = []\r\n\r\n for i in range(0, FLAGS.n_out):\r\n wo = self._create_variable_with_weight_decay(\r\n tf.random_normal([dims[i], dims[i + 1]],\r\n stddev=FLAGS.weight_init / np.sqrt(dims[i])),\r\n 'w_out_%d' % i, 1.0)\r\n weights_out.append(wo)\r\n\r\n biases_out.append(tf.Variable(tf.zeros([1, dim_out])))\r\n z = tf.matmul(h_out[i], weights_out[i]) + biases_out[i]\r\n # No batch norm on output because p_cf != p_f\r\n\r\n h_out.append(self.nonlin(z))\r\n h_out[i + 1] = tf.nn.dropout(h_out[i + 1], do_out)\r\n\r\n weights_pred = self._create_variable(tf.random_normal([dim_out+dim_in, 1],\r\n stddev=FLAGS.weight_init / np.sqrt(dim_out)),\r\n 'w_pred')\r\n bias_pred = self._create_variable(tf.zeros([1]), 'b_pred')\r\n\r\n if FLAGS.varsel or FLAGS.n_out == 0:\r\n self.wd_loss += tf.nn.l2_loss(\r\n tf.slice(weights_pred, [0, 0], [dim_out - 1, 1])) # don't penalize treatment coefficient\r\n else:\r\n self.wd_loss += tf.nn.l2_loss(weights_pred)\r\n\r\n h_pred = h_out[-1]\r\n h_pred_full = tf.concat([phi,h_pred],axis=1)\r\n y = tf.matmul(h_pred_full, weights_pred) + bias_pred\r\n y_seq = tf.squeeze(y_seq, [2])\r\n y_ls = tf.concat([y_seq, y], 1)\r\n return y_ls\r\n\r\n def _build_output_graph(self, rnn_outputs, t, dim_in, dim_out, do_out, FLAGS):\r\n ''' Construct output/regression layers '''\r\n\r\n i0 = tf.to_int32(tf.where(t < 1)[:,0])\r\n i1 = tf.to_int32(tf.where(t > 0)[:,0])\r\n\r\n rep_fw=rnn_outputs[0]\r\n rep_bw=rnn_outputs[1]\r\n\r\n rep_fw0 = tf.gather(rep_fw, i0)\r\n rep_fw1 = tf.gather(rep_fw, i1)\r\n rep_bw0 = tf.gather(rep_bw, i0)\r\n rep_bw1 = tf.gather(rep_bw, i1)\r\n phi_0 = tf.gather(self.phi[:,0,:], i0)\r\n phi_1 = tf.gather(self.phi[:,0,:], i1)\r\n\r\n y0 = self._build_output(phi_0, rep_fw0,rep_bw0, dim_in, dim_out, do_out, FLAGS)\r\n y1 = self._build_output(phi_1, rep_fw1,rep_bw1, dim_in, dim_out, do_out, FLAGS)\r\n\r\n y = tf.dynamic_stitch([i0, i1], [y0, y1])\r\n return y\r\n","sub_path":"cfr/cfr_net_future.py","file_name":"cfr_net_future.py","file_ext":"py","file_size_in_byte":14485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"346443153","text":"###\n# Reads files and converts them to printable vector arrays\n# A vectors is a continuously printed line\n###\n\nfrom coordinates import Coordinate as Co\n\nCo.default_order = 'xy'\n\n\nclass FileReader:\n def __init__(self, maxX: float, maxY: float):\n self.maxX = maxX\n self.maxY = maxY\n\n # Scale an image\n def scale(self, vectorArray: [float]):\n for vector in vectorArray:\n for coordinate in vector:\n coordinate.x *= self.maxX\n coordinate.y *= self.maxY\n return vectorArray\n\n # Converts local coordinates into percentage places in a grid\n def gridify(self, vectorArray: [float], resX: float, resY: float):\n for vector in vectorArray:\n for coordinate in vector:\n coordinate.x = coordinate.x / resX\n coordinate.y = coordinate.y / resY\n return vectorArray\n\n # Parses one vector following the format: x1, y1 - x2, y2\n def parseVector(self, line):\n vector = []\n coordinates = line.strip().split('-')\n for coordinate in coordinates:\n x, y = coordinate.strip().split(',')\n parsedCo = Co(int(x), int(y))\n vector.append(parsedCo)\n return vector\n\n # Parses files to printable lists of vectors\n # Lines that start with '#' are considered comments\n # The first line gives the x and y resolution\n # All following lines give one vector each\n def parse(self, file):\n xRes, yRes = 0, 0\n totalVec = []\n for line in file:\n try:\n line = line.strip()\n # Neglect comments\n if not line.startswith(\"#\"):\n # Catch the resolutions\n if xRes == 0 and yRes == 0:\n xRes, yRes = line.strip().split(',')\n xRes, yRes = int(xRes.strip()), int(xRes.strip())\n # Parse a vector\n else:\n totalVec.append(self.parseVector(line))\n except ValueError:\n print(\"Malformulated vectorline: \" + line)\n print(\"We will print the figure without this vector\")\n\n return self.scale(self.gridify(totalVec, xRes, yRes))\n","sub_path":"fileReader.py","file_name":"fileReader.py","file_ext":"py","file_size_in_byte":2250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"175340624","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # https://developers.arcgis.com/labs/python/download-data/\n# # Download data by python\n\n# In[23]:\n\n\n#imports\nfrom arcgis.gis import GIS\n\n#Python Standard Library Modules\nfrom pathlib import Path\nfrom zipfile import ZipFile\n\n#El código original da el error FileNotFoundError: [Errno 2] No such file or directory: 'data\\\\LA_Hub_Datasets.zip'\nimport os\n\n#ID pública de datos en ESRI\npublic_data_item_id = 'a04933c045714492bda6886f355416f2'\n\n#Usar conexión anónima\nanon_gis = GIS()\n\n#Usar el get() para hacer una solicitud de ArcGIS REST API sobre el contenido (ContentManager)\ndata_item = anon_gis.content.get(public_data_item_id)\n\n#ContentManaget.get regresa None, si el Id adjunto no es un Id valido\ndata_item\n\n#Descarga de datos\ndata_path = Path('./data')\n\nif not data_path.exists():\n data_path.mkdir()\n\n#El nombre original del archivo según la guía es \"LA_Hub_Datasets.zip\"; sin embargo, el nombre es \"LAHubDatasets.zip\"\nzip_path = data_path.joinpath('LAHubDatasets.zip')\nextract_path = data_path.joinpath('LA_Hub_datasets')\n\ndata_item.download(save_path = data_path)\n\n#Extracción de datos\n#C:\\Users\\USER_NAME\\ArcGIS\\python\nfilepath = os.path.join('C:\\\\', 'Users', 'USER_NAME', 'ArcGIS', 'python', 'data', 'LAHubDatasets.zip')\n\nzip_file = ZipFile(filepath)\nzip_file.extractall(path = extract_path)\n\n#Listar el contenido de lo extraído\n# Original:\n#list(file.name for file in extract_path.glob('*'))\n\n#Extra\ndef list_files(startpath):\n for root, dirs, files in os.walk(startpath):\n level = root.count(os.sep)\n indent = ' ' * 4 * (level)\n print('{}{}/'.format(indent, os.path.basename(root)))\n subindent = ' ' * 4 * (level + 1)\n for f in files:\n print('{}{}'.format(subindent, f))\n \nlist_files(extract_path)\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"python-export/01_Download_data.py","file_name":"01_Download_data.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"200300751","text":"# replace conostants as so\r\n# b -> bob\r\n# leave vowels alone\r\n\r\ndef translate(orig):\r\n\ttranslated = ''\r\n\tfor i in original:\r\n\t\tif i not in 'aeiouy' and i.isalpha():\r\n\t\t\ttranslated += i + 'o' + i.lower()\r\n\t\telse:\r\n\t\t\ttranslated += i\r\n\t\r\n\tprint(translated)\r\noriginal = input(\"Enter a sentence to be translated: \")\r\n\r\ntranslate(original)","sub_path":"swedish1.py","file_name":"swedish1.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"518728601","text":"import sys\nimport math\nimport numpy as np\n\n\nmeshName=sys.argv[1]\n\n\ncoordfile = '{}.coord'.format(meshName)\nvelofile = 'VELOC.alya'\npressfile = 'PRESS.alya'\ndensifile = 'DENSI.alya'\n\nfCoord =open(coordfile,'r')\nfVel =open(velofile,'w')\nfPress =open(pressfile,'w')\nfDensi =open(densifile,'w')\n\npi = 3.14159265\nc = 0.0\n\nprint('---| Start writing initial condition')\nfor line in fCoord:\n data=line.split()\n\n pid = int(data[0])\n dims = len(data)-1\n x = float(data[1])\n y = float(data[2])\n\n if x < c:\n vx = 0.0\n vy = 0.0\n pr = 1.0\n rho = 1.0\n elif x>= c:\n vx = 0.0\n vy = 0.0\n pr = 0.1\n rho = 0.125\n\n fVel.write('{} {} {}\\n'.format(pid,vx,vy))\n fPress.write('{} {}\\n'.format(pid,pr))\n fDensi.write('{} {}\\n'.format(pid,rho))\n \nfCoord.close()\nfVel.close()\nfPress.close()\nfDensi.close()\n\nprint('---| End writing initial condition')\n","sub_path":"mesh2d/initialCondition.py","file_name":"initialCondition.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"161727480","text":"#!/usr/bin/python3\nimport logging\nimport os\nimport pandas\n\nlogger = logging.getLogger()\nhandler = logging.StreamHandler()\nformatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\nlogger.setLevel(logging.INFO)\n\nSPREADSHEET = os.path.dirname(os.path.realpath(__file__)) + '/../Redes e Beaglebones.xlsx'\n\nSHEET_MBTEMP = 'PVs MBTemp'\nSHEET_4UHV = 'PVs Agilent 4UHV'\nSHEET_MKS = 'PVs MKS937b'\nSHEET_COUNTING_PRU = 'PVs Counting PRU'\n\nclass DbData():\n def __init__(self, sheet_name, ip='IP', aditional_check = None):\n '''\n :param sheet_name: xlsx sheet name.\n :param ip: column name containing the ip address.\n :param aditional_check: a function that receives row and sheet_name and returns True or False depending on the check result.\n '''\n self.data = {}\n sheet = pandas.read_excel(SPREADSHEET, sheet_name=sheet_name, dtype=str)\n sheet = sheet.replace('nan', '')\n for index, row in sheet.iterrows():\n if row[ip] == '':\n logger.error('%s: Ip not set for %s {}.' % (sheet_name, row))\n continue\n\n if aditional_check and not aditional_check(row, sheet_name):\n continue\n\n if row[ip] in self.data:\n self.data[row[ip]].append(row)\n else:\n self.data[row[ip]] = [row]\n logger.info('Generated data structure from sheet %s.' % sheet_name)\n\ndef mks_check(row, sheet_name):\n if row['Configuracao'] == '':\n logger.error('%s: Configuration not set for {}'.format(sheet_name, row))\n return False\n return True\n\n'''\n Data structures will contain a vector of entries related to a single IP\n'''\nDATA_MBTEMP = DbData(SHEET_MBTEMP).data\nDATA_4UHV = DbData(SHEET_4UHV).data\nDATA_MKS = DbData(SHEET_MKS, aditional_check=mks_check).data\nDATA_COUNTING_PRU = DbData(SHEET_COUNTING_PRU).data\n","sub_path":"common/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"342720022","text":"from chess_board import *\r\nfrom FibonacciHeap import *\r\nfrom .heuristic import heuristic\r\n\r\n\r\ndef GetRoute(start, goal, TracebackDict):\r\n '''\r\n This is a function for getting the route from\r\n start point to goal point\r\n ========================================\r\n for every step we append the point(step)\r\n to the end of the route, so we get the\r\n route from goal point to start point.\r\n Finally, we return the reverse of the list\r\n '''\r\n step = goal\r\n ans = []\r\n\r\n while step != start:\r\n ans.append(step)\r\n step = TracebackDict[step]\r\n ans.append(step)\r\n return ans[::-1]\r\n\r\n\r\nclass SQUARE:\r\n '''\r\n record the data of a square (a point)\r\n (x, y) = position\r\n g = g()\r\n h = heuristic()\r\n '''\r\n\r\n def __init__(self, x, y, g=float('inf'), h=float('inf')):\r\n self.x = x\r\n self.y = y\r\n self.g = g\r\n self.h = h\r\n\r\n\r\ndef Astar(start, goal):\r\n Astar.expanded = 0\r\n\r\n # a dict to remember the last step\r\n TraceBackDict = {}\r\n # explored set\r\n Explored = set()\r\n\r\n # initialize heap\r\n HEAP = FibonacciHeap()\r\n NODE = {}\r\n Index = {}\r\n (x, y) = start\r\n NODE[start] = SQUARE(x, y, 0, heuristic(start, goal))\r\n Index[start] = HEAP.Insert(NODE[start].h, NODE[start])\r\n\r\n while not HEAP.empty():\r\n # while the heap is not empty, pop the first one\r\n # and check whether it is goal point\r\n Astar.expanded += 1\r\n MIN = HEAP.ExtractMin()\r\n pos = (MIN.x, MIN.y)\r\n Explored.add(pos)\r\n g = MIN.g\r\n h = MIN.h\r\n if pos == goal:\r\n break\r\n\r\n NextStepList = next_step_list(pos)\r\n for next_step in NextStepList:\r\n if next_step in Explored:\r\n continue\r\n\r\n # for the next step which has not been explored,\r\n # conpute it's f, g, h, and compare with the original one\r\n (x, y) = next_step\r\n new_h = heuristic((x, y), goal)\r\n new_g = g + 1\r\n new_f = new_g + new_h\r\n if next_step in NODE:\r\n if NODE[next_step].g > new_g:\r\n # if a node is in the heap, and have smaller g, change it's key,\r\n # as Fibonacci heap has O(1) in decreasing key and O(log n) in\r\n # increasing key as the same point has the same h(), so new_g < g,\r\n # new_h == h --> new_f < f, so we always change key in O(1)\r\n NODE[next_step].g = new_g\r\n NODE[next_step].h = new_h\r\n HEAP.ChangeKey(Index[next_step], new_f)\r\n TraceBackDict[next_step] = pos\r\n else:\r\n # if it's not in the heap, insert it and\r\n # add some information to TraceBackDict\r\n NODE[next_step] = SQUARE(x, y, new_g, new_h)\r\n Index[next_step] = HEAP.Insert(new_f, NODE[next_step])\r\n TraceBackDict[next_step] = pos\r\n\r\n # print\r\n Path = GetRoute(start, goal, TraceBackDict)\r\n for step in Path:\r\n STR = '(' + str(step[0]) + ',' + str(step[1]) + ')'\r\n print(STR, end='')\r\n print()\r\n return Astar.expanded\r\n","sub_path":"0716072/SearchingStrategies/Astar.py","file_name":"Astar.py","file_ext":"py","file_size_in_byte":3216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"225113848","text":"from typing import Set, cast\n\nimport bpy\nfrom bpy_extras.io_utils import ExportHelper\n\nfrom ..common.preferences import get_preferences, use_legacy_importer_exporter\nfrom ..editor import search, validation\nfrom ..editor.vrm0.panel import (\n draw_vrm0_humanoid_operators_layout,\n draw_vrm0_humanoid_required_bones_layout,\n)\nfrom ..editor.vrm0.property_group import Vrm0HumanoidPropertyGroup\nfrom .abstract_base_vrm_exporter import AbstractBaseVrmExporter\nfrom .gltf2_addon_vrm_exporter import Gltf2AddonVrmExporter\nfrom .legacy_vrm_exporter import LegacyVrmExporter\n\n\ndef export_vrm_update_addon_preferences(\n export_op: bpy.types.Operator, context: bpy.types.Context\n) -> None:\n preferences = get_preferences(context)\n if not preferences:\n return\n if bool(preferences.export_invisibles) != bool(export_op.export_invisibles):\n preferences.export_invisibles = export_op.export_invisibles\n if bool(preferences.export_only_selections) != bool(\n export_op.export_only_selections\n ):\n preferences.export_only_selections = export_op.export_only_selections\n\n\nclass EXPORT_SCENE_OT_vrm(bpy.types.Operator, ExportHelper): # type: ignore[misc] # noqa: N801\n bl_idname = \"export_scene.vrm\"\n bl_label = \"Export VRM\"\n bl_description = \"Export VRM\"\n bl_options = {\"REGISTER\", \"UNDO\"}\n\n filename_ext = \".vrm\"\n filter_glob: bpy.props.StringProperty( # type: ignore[valid-type]\n default=\"*.vrm\", options={\"HIDDEN\"} # noqa: F722,F821\n )\n\n # vrm_version : bpy.props.EnumProperty(name=\"VRM version\" ,items=((\"0.0\",\"0.0\",\"\"),(\"1.0\",\"1.0\",\"\")))\n export_invisibles: bpy.props.BoolProperty( # type: ignore[valid-type]\n name=\"Export Invisible Objects\", # noqa: F722\n update=export_vrm_update_addon_preferences,\n )\n export_only_selections: bpy.props.BoolProperty( # type: ignore[valid-type]\n name=\"Export Only Selections\", # noqa: F722\n update=export_vrm_update_addon_preferences,\n )\n\n errors: bpy.props.CollectionProperty(type=validation.VrmValidationError) # type: ignore[valid-type]\n\n def execute(self, _context: bpy.types.Context) -> Set[str]:\n if not self.filepath:\n return {\"CANCELLED\"}\n filepath: str = self.filepath\n\n if bpy.ops.vrm.model_validate(\n \"INVOKE_DEFAULT\", show_successful_message=False\n ) != {\"FINISHED\"}:\n return {\"CANCELLED\"}\n\n export_objects = search.export_objects(\n bool(self.export_invisibles), bool(self.export_only_selections)\n )\n is_vrm1 = any(\n obj.type == \"ARMATURE\" and obj.data.vrm_addon_extension.is_vrm1()\n for obj in export_objects\n )\n\n if is_vrm1:\n vrm_exporter: AbstractBaseVrmExporter = Gltf2AddonVrmExporter(\n export_objects\n )\n else:\n vrm_exporter = LegacyVrmExporter(export_objects)\n\n vrm_bin = vrm_exporter.export_vrm()\n if vrm_bin is None:\n return {\"CANCELLED\"}\n with open(filepath, \"wb\") as f:\n f.write(vrm_bin)\n return {\"FINISHED\"}\n\n def invoke(self, context: bpy.types.Context, event: bpy.types.Event) -> Set[str]:\n preferences = get_preferences(context)\n if preferences:\n self.export_invisibles = bool(preferences.export_invisibles)\n self.export_only_selections = bool(preferences.export_only_selections)\n if not use_legacy_importer_exporter() and \"gltf\" not in dir(\n bpy.ops.export_scene\n ):\n return cast(\n Set[str],\n bpy.ops.wm.vrm_gltf2_addon_disabled_warning(\n \"INVOKE_DEFAULT\",\n ),\n )\n\n export_objects = search.export_objects(\n bool(self.export_invisibles), bool(self.export_only_selections)\n )\n\n armatures = [obj for obj in export_objects if obj.type == \"ARMATURE\"]\n is_vrm0 = any(\n armature.data.vrm_addon_extension.is_vrm0() for armature in armatures\n )\n if len(armatures) == 1 and is_vrm0:\n armature = armatures[0]\n Vrm0HumanoidPropertyGroup.fixup_human_bones(armature)\n Vrm0HumanoidPropertyGroup.check_last_bone_names_and_update(\n armature.data.name,\n defer=False,\n )\n humanoid = armature.data.vrm_addon_extension.vrm0.humanoid\n if all(b.node.value not in b.node_candidates for b in humanoid.human_bones):\n bpy.ops.vrm.assign_vrm0_humanoid_human_bones_automatically(\n armature_name=armature.name\n )\n if not humanoid.all_required_bones_are_assigned():\n bpy.ops.wm.vrm_export_human_bones_assignment(\"INVOKE_DEFAULT\")\n return {\"CANCELLED\"}\n\n if bpy.ops.vrm.model_validate(\n \"INVOKE_DEFAULT\", show_successful_message=False\n ) != {\"FINISHED\"}:\n return {\"CANCELLED\"}\n\n return cast(Set[str], ExportHelper.invoke(self, context, event))\n\n def draw(self, _context: bpy.types.Context) -> None:\n pass # Is needed to get panels available\n\n\nclass VRM_PT_export_error_messages(bpy.types.Panel): # type: ignore[misc] # noqa: N801\n bl_idname = \"VRM_IMPORTER_PT_export_error_messages\"\n bl_space_type = \"FILE_BROWSER\"\n bl_region_type = \"TOOL_PROPS\"\n bl_parent_id = \"FILE_PT_operator\"\n bl_label = \"\"\n bl_options = {\"HIDE_HEADER\"}\n\n @classmethod\n def poll(cls, context: bpy.types.Context) -> bool:\n return (\n str(context.space_data.active_operator.bl_idname) == \"EXPORT_SCENE_OT_vrm\"\n )\n\n def draw(self, context: bpy.types.Context) -> None:\n layout = self.layout\n layout.use_property_split = True\n layout.use_property_decorate = False # No animation.\n\n operator = context.space_data.active_operator\n\n layout.prop(operator, \"export_invisibles\")\n layout.prop(operator, \"export_only_selections\")\n\n validation.WM_OT_vrm_validator.detect_errors(\n context, operator.errors, False, layout\n )\n\n\ndef menu_export(export_op: bpy.types.Operator, _context: bpy.types.Context) -> None:\n export_op.layout.operator(EXPORT_SCENE_OT_vrm.bl_idname, text=\"VRM (.vrm)\")\n\n\nclass WM_OT_export_human_bones_assignment(bpy.types.Operator): # type: ignore[misc] # noqa: N801\n bl_label = \"VRM Required Bones Assignment\"\n bl_idname = \"wm.vrm_export_human_bones_assignment\"\n bl_options = {\"REGISTER\", \"UNDO\"}\n\n def execute(self, context: bpy.types.Context) -> Set[str]:\n preferences = get_preferences(context)\n if preferences:\n export_invisibles = bool(preferences.export_invisibles)\n export_only_selections = bool(preferences.export_only_selections)\n else:\n export_invisibles = False\n export_only_selections = False\n export_objects = search.export_objects(\n export_invisibles, export_only_selections\n )\n armatures = [obj for obj in export_objects if obj.type == \"ARMATURE\"]\n if len(armatures) != 1:\n return {\"CANCELLED\"}\n armature = armatures[0]\n if not armature.data.vrm_addon_extension.is_vrm0():\n return {\"CANCELLED\"}\n Vrm0HumanoidPropertyGroup.fixup_human_bones(armature)\n Vrm0HumanoidPropertyGroup.check_last_bone_names_and_update(\n armature.data.name,\n defer=False,\n )\n humanoid = armature.data.vrm_addon_extension.vrm0.humanoid\n if not humanoid.all_required_bones_are_assigned():\n return {\"CANCELLED\"}\n bpy.ops.export_scene.vrm(\"INVOKE_DEFAULT\")\n return {\"FINISHED\"}\n\n def invoke(self, context: bpy.types.Context, _event: bpy.types.Event) -> Set[str]:\n return cast(\n Set[str], context.window_manager.invoke_props_dialog(self, width=550)\n )\n\n def draw(self, context: bpy.types.Context) -> None:\n preferences = get_preferences(context)\n if preferences:\n export_invisibles = bool(preferences.export_invisibles)\n export_only_selections = bool(preferences.export_only_selections)\n else:\n export_invisibles = False\n export_only_selections = False\n\n armatures = [\n obj\n for obj in search.export_objects(export_invisibles, export_only_selections)\n if obj.type == \"ARMATURE\"\n ]\n if not armatures:\n return\n armature = armatures[0]\n\n layout = self.layout\n humanoid = armature.data.vrm_addon_extension.vrm0.humanoid\n if humanoid.all_required_bones_are_assigned():\n alert_row = layout.box()\n alert_row.label(\n text=\"All VRM Required Bones have been assigned.\", icon=\"CHECKMARK\"\n )\n else:\n alert_row = layout.box()\n alert_row.alert = True\n alert_row.label(\n text=\"There are unassigned VRM Required Bones. Please assign all.\",\n icon=\"ERROR\",\n )\n draw_vrm0_humanoid_operators_layout(armature, layout)\n draw_vrm0_humanoid_required_bones_layout(armature, layout)\n","sub_path":"io_scene_vrm/exporter/export_scene.py","file_name":"export_scene.py","file_ext":"py","file_size_in_byte":9249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"114573390","text":"#!/usr/bin/python\n# Classification (U)\n\n\"\"\"Program: db_chg_db.py\n\n Description: Integration testing of DB.chg_db in mongo_class.py.\n\n Usage:\n test/integration/mongo_class/db_chg_db.py\n\n Arguments:\n\n\"\"\"\n\n# Libraries and Global Variables\n\n# Standard\nimport sys\nimport os\n\nif sys.version_info < (2, 7):\n import unittest2 as unittest\nelse:\n import unittest\n\n# Third-party\n\n# Local\nsys.path.append(os.getcwd())\nimport mongo_class\nimport lib.gen_libs as gen_libs\nimport version\n\n__version__ = version.__version__\n\n\nclass UnitTest(unittest.TestCase):\n\n \"\"\"Class: UnitTest\n\n Description: Class which is a representation of a unit testing.\n\n Methods:\n setUp\n test_database_passed\n test_no_database\n\n \"\"\"\n\n def setUp(self):\n\n \"\"\"Function: setUp\n\n Description: Initialization for unit testing.\n\n Arguments:\n\n \"\"\"\n\n self.base_dir = \"test/integration\"\n self.config_dir = os.path.join(self.base_dir, \"config\")\n self.config_name = \"mongo\"\n self.cfg = gen_libs.load_module(self.config_name, self.config_dir)\n self.database = \"admin\"\n\n def test_database_passed2(self):\n\n \"\"\"Function: test_database_passed2\n\n Description: Test with database passed.\n\n Arguments:\n\n \"\"\"\n\n mongo = mongo_class.DB(\n self.cfg.name, self.cfg.user, self.cfg.japd, host=self.cfg.host,\n port=self.cfg.port, use_arg=self.cfg.use_arg,\n ssl_client_ca=self.cfg.ssl_client_ca,\n ssl_client_key=self.cfg.ssl_client_key,\n ssl_client_cert=self.cfg.ssl_client_cert,\n ssl_client_phrase=self.cfg.ssl_client_phrase)\n mongo.connect()\n mongo.chg_db(self.database)\n\n self.assertTrue(mongo.db)\n\n def test_database_passed(self):\n\n \"\"\"Function: test_database_passed\n\n Description: Test with database passed.\n\n Arguments:\n\n \"\"\"\n\n mongo = mongo_class.DB(\n self.cfg.name, self.cfg.user, self.cfg.japd, host=self.cfg.host,\n port=self.cfg.port, use_arg=self.cfg.use_arg,\n ssl_client_ca=self.cfg.ssl_client_ca,\n ssl_client_key=self.cfg.ssl_client_key,\n ssl_client_cert=self.cfg.ssl_client_cert,\n ssl_client_phrase=self.cfg.ssl_client_phrase)\n mongo.connect()\n mongo.chg_db(dbs=self.database)\n\n self.assertEqual(mongo.db_name, self.database)\n\n def test_no_database2(self):\n\n \"\"\"Function: test_no_database2\n\n Description: Test with no database passed.\n\n Arguments:\n\n \"\"\"\n\n mongo = mongo_class.DB(\n self.cfg.name, self.cfg.user, self.cfg.japd, host=self.cfg.host,\n port=self.cfg.port, use_arg=self.cfg.use_arg,\n ssl_client_ca=self.cfg.ssl_client_ca,\n ssl_client_key=self.cfg.ssl_client_key,\n ssl_client_cert=self.cfg.ssl_client_cert,\n ssl_client_phrase=self.cfg.ssl_client_phrase)\n mongo.connect()\n mongo.chg_db()\n\n self.assertTrue(mongo.db)\n\n def test_no_database(self):\n\n \"\"\"Function: test_no_database\n\n Description: Test with no database passed.\n\n Arguments:\n\n \"\"\"\n\n mongo = mongo_class.DB(\n self.cfg.name, self.cfg.user, self.cfg.japd, host=self.cfg.host,\n port=self.cfg.port, use_arg=self.cfg.use_arg,\n ssl_client_ca=self.cfg.ssl_client_ca,\n ssl_client_key=self.cfg.ssl_client_key,\n ssl_client_cert=self.cfg.ssl_client_cert,\n ssl_client_phrase=self.cfg.ssl_client_phrase)\n mongo.connect()\n mongo.chg_db()\n\n self.assertEqual(mongo.db_name, \"test\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"test/integration/mongo_class/db_chg_db.py","file_name":"db_chg_db.py","file_ext":"py","file_size_in_byte":3764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"385327968","text":"import pandas as pd\nimport json\n\nREPORTING_LEVELS = [\"A: Summary\", \"B: Business\", \"C: Coverage & Detail\",\n \"D: Format & Structure\", \"E: Attribution\", \"F: Technical Metadata\"]\n\ndef main():\n df = pd.read_csv(\"config/weights/weightings_config.tsv\", sep=\"\\t\")\n main_dict = {}\n for section in REPORTING_LEVELS:\n section_df = df[df[\"Section\"] == section]\n section_df = section_df[[\"Attribute\", \"Weighting\"]]\n sub_dict = {}\n for r, v in section_df.iterrows():\n sub_dict[v[\"Attribute\"]] = float(v[\"Weighting\"])\n main_dict[section] = sub_dict\n # Serializing json\n json_object = json.dumps(main_dict, indent=4)\n\n # Writing to sample.json\n with open(\"config/weights/weights.json\", \"w\") as outfile:\n outfile.write(json_object)\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"config/weights/create_weightings_json.py","file_name":"create_weightings_json.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"331264080","text":"import os\nimport jieba\nimport pandas as pd\nimport sys\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nfrom sklearn.cluster import KMeans\nfrom collections import Counter\nimport json\nimport logging\n\n\ndef get_logger():\n \"\"\"\n 创建日志实例\n \"\"\"\n formatter = logging.Formatter(\"%(asctime)s - %(message)s\")\n logger = logging.getLogger(\"monitor\")\n logger.setLevel(logging.INFO)\n\n ch = logging.StreamHandler()\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n return logger\n\nlogger = get_logger()\n\n\ndef normalize(corpus):\n texts = []\n for text in corpus:\n text =\" \".join(jieba.lcut(text))\n texts.append(text)\n return texts\n\n\ndef read_data(filename):\n book_data = pd.read_csv(filename)\n book_titles = book_data['title'].tolist()\n book_content = book_data['content'].tolist()\n norm_book_content = normalize(book_content)\n return book_data,book_titles,norm_book_content\n\n\ndef get_features(book_content):\n # 提取 tf-idf 特征\n vectorizer = TfidfVectorizer()\n feature_matrix = vectorizer.fit_transform(book_content).astype(float)\n feature_names = vectorizer.get_feature_names()\n return feature_matrix,feature_names\n\n\ndef k_means(feature_matrix, num_clusters=10):\n km = KMeans(n_clusters=num_clusters,\n max_iter=10000)\n km.fit(feature_matrix)\n clusters = km.labels_\n return km, clusters\n\n\ndef get_data(clustering_obj, book_data,\n feature_names, num_clusters,\n topn_features=10):\n deta = {}\n ordered_centroids = clustering_obj.cluster_centers_.argsort()[:, ::-1]\n for i in range(num_clusters):\n deta[i] = {}\n deta[i]['cluster_num'] = i\n key_features = [feature_names[index]for index\n in ordered_centroids[i, :topn_features]]\n deta[i]['key_features'] = key_features\n books = book_data[book_data['Cluster'] == i]['title'].values.tolist()\n deta[i]['books'] = books\n return deta\n\n\ndef write(item):\n with open(\"recolist.json\", \"ab\") as f:\n text = json.dumps(dict(item), ensure_ascii=False) + '\\n'\n f.write(text.encode('utf-8'))\n logger.info(\"writeOK\")\n\n\ndef main(argc, argv, envp):\n book_data,book_titles, book_content = read_data('data/data.csv')\n feature_matrix, feature_names = get_features(book_content)\n print(feature_matrix[:1,1:10])\n # km_obj, clusters = k_means(feature_matrix=feature_matrix, num_clusters=10)\n # book_data['Cluster'] = clusters\n # c = Counter(clusters)\n # logger.info('c.items', c.items())\n # item = get_data(clustering_obj=km_obj,\n # book_data=book_data,\n # feature_names=feature_names,\n # num_clusters=10,\n # topn_features=5)\n # write(item)\n return None\n\n\nif __name__ == '__main__':\n sys.exit(main(len(sys.argv), sys.argv, os.environ))","sub_path":"NLP/文本聚类/文本分类.py","file_name":"文本分类.py","file_ext":"py","file_size_in_byte":3005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"33329847","text":"import pandas as pd\nimport numpy as np\nimport json\ndata = pd.read_csv(\"data/movies.csv\")\n\ndef getData():\n return data\n\nprofile = {}\ntrained = set()\ndef train_profile_random(n):\n x = (np.random.rand(n)*len(data)/2).astype(int)\n for i in x:\n if i in trained:\n np.append(x, np.random.rand(1)*len(data)/2)\n else:\n train(data[\"genres\"][i], int(input(\"rate \" + data[\"title\"][i] + \" from 0 to 10: \")))\n for genre in profile:\n profile[genre] = sum(profile[genre])/len(profile[genre])\n\ndef train_profile_from_top(n):\n for i in range(n):\n train(data[\"genres\"][i], int(input(\"rate \" + data[\"title\"][i] + \" from 0 to 10: \")))\n for genre in profile:\n profile[genre] = sum(profile[genre])/len(profile[genre])\n\ndef train(genreset, rating):\n genreset = json.loads(genreset)\n for genre in genreset:\n if genre[\"name\"] not in profile:\n profile[genre[\"name\"]] = [rating]\n else:\n profile[genre[\"name\"]].append(rating)\n\ndef getProfile():\n return profile\n\n# train_profile(5)\n# train_profile(5)\n# print(profile)\n\n# Judge from contour of colors which is mapped ot a set of genres. These contours are generated from the movies.","sub_path":"movie_recommendation_service/DataGatherring.py","file_name":"DataGatherring.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"82317707","text":"# Copyright 2019 GreenWaves Technologies, SAS\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# pylint: disable=line-too-long\n\nimport logging\n\nfrom graph.dim import PadDim\n\nfrom .code_block import CodeBlock\nfrom .kernel_parameters import (NO_ACTIVATION, NO_CONV, NO_POOL,\n ActivationATParam, ConvATParam,\n GroupedConvATParam, LinearATParam,\n MatrixAddATParam, PoolATParam, SoftMaxATParam,\n TwoDTransposeATParam, GenCtrl)\n\nLOG = logging.getLogger(\"nntool.\" + __name__)\n\nGEN_CONV_POOL_RELU = \"CNN_ConvolutionPoolReLU\"\nGEN_GROUPED_CONV_POOL_RELU = \"CNN_GroupedConvolutionPoolReLU\"\nGEN_POOL_RELU = \"CNN_PoolReLU\"\nGEN_LINEAR_RELU = \"CNN_LinearReLU\"\nGEN_SOFTMAX = \"CNN_SoftMax\"\nGEN_MATADD = \"CNN_MatAdd\"\nGEN_MATADDDYN = \"CNN_MatAddDynAdjust\"\nGEN_2D_TRANSPOSE = \"CNN_Mat2DTranspose\"\n\n\ndef gen_ctrl_call(api, op, val, code_block):\n if isinstance(val, str):\n val = 'AT_VAL(\"%s\")' % val\n elif isinstance(val, bool):\n val = val and 'AT_OPT_ON' or 'AT_OPT_OFF'\n elif isinstance(val, int):\n val = 'AT_VAL(%s)' % val\n else:\n raise ValueError()\n\n code_block.write('{}({}, {});', api, op, val)\n\n\ndef gen_kernel_ctrl(op, val, code_block):\n gen_ctrl_call('AT_SetKernelCtrl', op, val, code_block)\n\n\ndef gen_graph_ctrl(op, val, code_block):\n gen_ctrl_call('AT_SetGraphCtrl', op, val, code_block)\n\n# TCArgInfo(\"int *__restrict__\", \"JumboOut\", ARG_SCOPE_ARG, ARG_DIR_OUT, ARG_LOC_L2, ARG_LOC_L2)\n\n\ndef gen_output_decl(eparams, in_q, home_location, exec_location, code_block, allocate=False):\n code_block.write('TCArgInfo(\"{0} *__restrict__\", \"{1}\", {2}, ARG_DIR_OUT, {3}, {4}, 0)'\n .format(in_q.ctype, eparams.name, \"ARG_SCOPE_ARG_ALLOC\" if allocate else \"ARG_SCOPE_ARG\", home_location, exec_location))\n\n# TCArgInfo(\"short int *__restrict__\", \"JumboIn\", ARG_SCOPE_ARG, ARG_DIR_IN, ARG_LOC_L2, ARG_LOC_L2),\n\n\ndef gen_input_decl(eparams, out_q, home_location, exec_location, code_block, allocate=False):\n code_block.write('TCArgInfo(\"{0} *__restrict__\", \"{1}\", {2}, ARG_DIR_IN, {3}, {4}, 0)'\n .format(out_q.ctype, eparams.name, \"ARG_SCOPE_ARG_ALLOC\" if allocate else \"ARG_SCOPE_ARG\", home_location, exec_location))\n\n# TCArgInfo(\"short int *__restrict__\", \"JumboL1_Filter\", ARG_SCOPE_GLOBAL, ARG_DIR_CONSTIN, ARG_LOC_L2, ARG_LOC_L3_HFLASH),\n\n# ConstInfo(\n# \tchar *FileName,\t\t\t/**< Name of the file containing the list of values for this constant vector */\n# \tint Format,\t\t\t/**< Format of the list of value, 0: floating point, 1: fixed point */\n# \tint Size,\t\t\t/**< Size of the fixed point container */\n# \tint Fract\t\t\t/**< Fractional part size, Qx.Fract, note that x+Fract < Size */\n# \t);\n\n# ConstInit_T *ConstInfo(\n# \tchar *FileName,\t\t\t/**< Name of the file containing the list of values for this constant vector */\n# \tint Format,\t\t\t/**< Format of the list of value, 0: floating point, 1: fixed point */\n# \tint Binary,\t\t\t/**< If 1 file content is binary, if 0 file content is text */\n# \tint Size,\t\t\t/**< Size of the fixed point container */\n# \tint Fract\t\t\t/**< Fractional part size, Qx.Fract, note that x+Fract < Size */\n# \t);\n\n\nFMT_TYPES = {\n 'float': 0,\n 'fixed': 1\n}\n\n\ndef gen_str(name):\n return '\"%s\"' % name\n\n\ndef gen_const_info(fname, qtype, fmt=\"fixed\"):\n fmt = FMT_TYPES[fmt]\n return 'ConstInfo(\"{0}\", {1}, 1, {2}, {3})'.format(fname, fmt, qtype.bits, qtype.q)\n\n\ndef gen_global_decl(name, qtype, home_location, exec_location, code_block, const_info=None):\n if const_info is None:\n const_info = \"0\"\n code_block.write('TCArgInfo(\"{0} *__restrict__\", \"{1}\", ARG_SCOPE_GLOBAL, ARG_DIR_CONSTIN, {2}, {3}, {4})'\n .format(qtype.ctype, name, home_location, exec_location, const_info))\n\n\ndef gen_stack_decl(out_name, in_names, code_block):\n code_block.write('AddStackedTensors(\"{}\", {}, {});'\n .format(out_name, len(in_names), ', '.join([gen_str(in_name) for in_name in in_names])))\n\n\ndef gen_local_decl(eparams, qtype, location, code_block):\n code_block.write('TCArgInfo(\"{0} *__restrict__\", \"{1}\", ARG_SCOPE_LOCAL, ARG_DIR_INOUT, {2}, {2}, 0)'\n .format(qtype.ctype, eparams.name, location))\n\n\ndef gen_gnode_arg(direction, name):\n return 'GNodeArg({}, \"{}\", 0)'.format(direction, name)\n\n\ndef gen_g_arg(name):\n return 'GArg(\"{}\")'.format(name)\n\n\ndef gen_g_node_c_arg(name):\n return 'GNodeCArg(\"{}\")'.format(name)\n\n\ndef gen_imm_arg(symbol):\n return \"Imm({})\".format(symbol)\n\n# AddNode(\"L0_1__3_16_224_224_C3x3_S1_PM2x2_S2_H\", Bindings(5, GNodeArg(GNA_IN, \"DarknetIn\",0), GNodeArg(GNA_IN, \"DarknetL0_1_Filter\",0), GNodeArg(GNA_IN, \"DarknetL0_1_Bias\",0), GNodeArg(GNA_OUT, \"DarknetL0_1_Out\",0), Imm(Norm)));\n\n\ndef gen_at_bindings(name, binding_list, code_block):\n code_block.write('AddNode(\"{0}\", Bindings({1}, {2}));'\n .format(name, len(binding_list), \", \".join(binding_list)))\n\n\ndef gen_at_func_bindings(name, func_name, where, binding_list, code_block):\n code_block.write('AddCallToNode(\"{0}\", {1}, \"{2}\", Bindings({3}, {4}));'\n .format(name, where, func_name, len(binding_list), \", \".join(binding_list)))\n\n\ndef is_dp(conv_q):\n # if conv_q.calc_q == conv_q.acc_q and\\\n # conv_q.acc_q.bits > conv_q.out_qs[0].bits:\n # cop = \"KOP_CONV_DP\"\n # else:\n # cop = \"KOP_CONV\"\n return True\n\n\ndef gen_conv_at_params(params, conv_q, pad_compatibilities, do_dp=False):\n if params.is_depthwise_conv():\n assert params.multiplier == 1, \"Multiplier not supported\"\n assert not do_dp, \"No DP output for DW convolution\"\n cop = is_dp(conv_q) and \"KOP_CONV_DWDP\" or \"KOP_CONV_DW\"\n elif params.is_grouped_conv():\n cop = is_dp(conv_q) and \"KOP_CONV_DP\" or \"KOP_CONV\"\n return GroupedConvATParam(\n ConvOper=cop,\n GroupIn=params.groups,\n GroupOut=params.multiplier,\n Fcx=params.filter.w,\n Fcy=params.filter.h,\n Dcx=params.dilation.w,\n Dcy=params.dilation.h,\n Scx=params.stride.w,\n Scy=params.stride.h,\n ConvPad=params.has_at_zero_pad() and 1 or 0\n )\n else:\n cop = is_dp(conv_q) and \"KOP_CONV_DP\" or \"KOP_CONV\"\n\n pad_compatibilities.append(params.padding.pad_compatibility)\n return ConvATParam(\n ConvOper=cop,\n Fcx=params.filter.w,\n Fcy=params.filter.h,\n Dcx=params.dilation.w,\n Dcy=params.dilation.h,\n Scx=params.stride.w,\n Scy=params.stride.h,\n ConvPad=params.has_at_zero_pad() and 1 or 0\n )\n\n\ndef gen_pool_at_params(params, pad_compatibilities):\n if params.pool_type == \"average\":\n pop = \"KOP_AVGPOOL\"\n elif params.pool_type == \"max\":\n pop = \"KOP_MAXPOOL\"\n else:\n raise NotImplementedError()\n\n pad_compatibilities.append(params.padding.pad_compatibility)\n return PoolATParam(\n PoolOper=pop,\n Fpx=params.filter.w,\n Fpy=params.filter.h,\n Dpx=1,\n Dpy=1,\n Spx=params.stride.w,\n Spy=params.stride.h,\n PoolPad=params.has_at_zero_pad() and 1 or 0\n )\n\n\ndef at_bits(qtype):\n # 1: byte, 2: half word, 4: word\n if qtype.bits == 8:\n return 1\n if qtype.bits == 16:\n return 2\n if qtype.bits == 32:\n return 4\n raise NotImplementedError(\"unsupported number of bits\")\n\n\ndef at_bits_and_q(qtype):\n return \"{}, {}\".format(at_bits(qtype), qtype.q)\n\n\ndef gen_active_at_params(params):\n if params.activation == \"relu\":\n aop = \"KOP_RELU\"\n elif params.activation == \"relu6\":\n aop = \"KOP_RELUN\"\n elif params.activation == \"sigmoid\":\n aop = \"KOP_HSIGMOID\"\n else:\n raise NotImplementedError()\n return ActivationATParam(\n ReLUOper=aop\n )\n\n\ndef gen_linear_at_params(_):\n return LinearATParam(\n LinearOper=\"KOP_LINEAR\"\n )\n\n\ndef gen_softmax_at_params(_):\n return SoftMaxATParam(\n SoftMaxOper=\"KOP_SOFTMAX\"\n )\n\n\ndef gen_matrixadd_at_params(_):\n return MatrixAddATParam(\n MatrixAddOper=\"KOP_MATADD\"\n )\n\n\ndef gen_matrixadddyn_at_params(_):\n return MatrixAddATParam(\n MatrixAddOper=\"KOP_MATADD_DYNADJUST\"\n )\n\n\ndef gen_2d_transpose_at_params(params):\n size = params.transpose_size\n return TwoDTransposeATParam(\n TwoDTransposeOper=\"KOP_MAT2DTRANSPOSE\",\n Width=size[0],\n Height=size[1]\n )\n\n# extern void CNN_PoolReLU(\n# \tchar *Name,\n\n# \tCNN_GenControl_T *Ctrl,\n\n# \tint In_DataSize,\n# \tint Out_DataSize,\n\n# \tint In_InL3, // 1 if In comes from L3, 0 if it comes from L2\n# \tint Out_InL3,\n\n# \tint InFeat,\n# \tint OutFeat,\n# \tint Width,\n# \tint Height,\n\n# \tKernelOper_T PoolOper,\n# \tint Fpx,\n# \tint Fpy,\n# \tint Dpx,\n# \tint Dpy,\n# \tint Spx,\n# \tint Spy,\n# \tint PoolPad,\n\n# \tKernelOper_T ReLUOper\n# \t);\n\n# pylint: disable=too-many-arguments\n\n\ndef gen_at_pool_relu(code_block, name, in_size, out_size, in_dim,\n out_dim, at_pool, at_active, gen_ctrl=None):\n if gen_ctrl is None:\n gen_ctrl = \"0\"\n else:\n gen_ctrl = gen_ctrl.ctrl_name\n\n if at_pool.PoolOper == 'KOP_NONE':\n if in_dim.is_named and in_dim.has_keys(['c', 'w', 'h']):\n dims = [in_dim.c, in_dim.h, in_dim.w, in_dim.c]\n else:\n dims = in_dim.shape.copy()\n dims = dims + [1] * (4 - len(dims))\n\n if out_dim.is_named and out_dim.has_key('c'):\n dims[3] = out_dim.c\n else:\n dims[3] = dims[0]\n else:\n dims = [in_dim.c, in_dim.h, in_dim.w, out_dim.c]\n\n code_block.write('{}(\"{}\", {}, {}, {}, 1, 1, {}, {}, {}, {},',\n GEN_POOL_RELU, name, gen_ctrl, in_size, out_size,\n dims[0], dims[3], dims[2], dims[1])\n code_block.indent()\n code_block.write('{}, {}, {}, {}, {}, {}, {}, {}, {});',\n at_pool.PoolOper, at_pool.Fpx, at_pool.Fpy,\n at_pool.Dpx, at_pool.Dpy, at_pool.Spx, at_pool.Spy,\n at_pool.PoolPad, at_active.ReLUOper)\n code_block.deindent()\n\n# extern void CNN_ConvolutionPoolReLU(\n# \tchar *Name,\n\n# \tCNN_GenControl_T *Ctrl,\n\n# \tint In_DataSize,\n# \tint Filter_DataSize,\n# \tint Bias_DataSize,\n# \tint Out_DataSize,\n\n# \tint In_InL3, // 1 if In comes from L3, 0 if it comes from L2\n# \tint Filter_InL3,\n# \tint Bias_InL3,\n# \tint Out_InL3,\n\n# \tint InFeat,\n# \tint OutFeat,\n# \tint Width,\n# \tint Height,\n\n# \tKernelOper_T ConvOper,\n# \tint Fcx,\n# \tint Fcy,\n# \tint Dcx,\n# \tint Dcy,\n# \tint Scx,\n# \tint Scy,\n# \tint ConvPad,\n\n# \tKernelOper_T PoolOper,\n# \tint Fpx,\n# \tint Fpy,\n# \tint Dpx,\n# \tint Dpy,\n# \tint Spx,\n# \tint Spy,\n# \tint PoolPad,\n\n# \tKernelOper_T ReLUOper\n# \t);\n\n# pylint: disable=too-many-arguments\n\n\ndef gen_at_conv_pool_relu(code_block: CodeBlock, name, in_size, out_size,\n filt_size, bias_size, in_dim, out_dim,\n at_conv, at_pool, at_active, gen_ctrl=None):\n if gen_ctrl is None:\n gen_ctrl = \"0\"\n else:\n gen_ctrl = gen_ctrl.ctrl_name\n\n code_block.write('{}(\"{}\", {}, {}, {}, {}, {}, 1, 1, 1, 1, {}, {}, {}, {},',\n GEN_CONV_POOL_RELU, name, gen_ctrl,\n in_size, filt_size, bias_size, out_size,\n in_dim.c, out_dim.c, in_dim.w, in_dim.h)\n code_block.indent()\n code_block.write('{}, {}, {}, {}, {}, {}, {}, {},',\n at_conv.ConvOper, at_conv.Fcx, at_conv.Fcy,\n at_conv.Dcx, at_conv.Dcy, at_conv.Scx, at_conv.Scy,\n at_conv.ConvPad)\n code_block.write('{}, {}, {}, {}, {}, {}, {}, {}, {});',\n at_pool.PoolOper, at_pool.Fpx, at_pool.Fpy,\n at_pool.Dpx, at_pool.Dpy, at_pool.Spx, at_pool.Spy,\n at_pool.PoolPad, at_active.ReLUOper)\n code_block.deindent()\n\n# extern void CNN_ConvolutionPoolReLU(\n# \tchar *Name,\n\n# \tCNN_GenControl_T *Ctrl,\n\n# GroupIn: Size of the group for input features\n# GroupOut: Size of the group for output features\n\n# \tint In_DataSize,\n# \tint Filter_DataSize,\n# \tint Bias_DataSize,\n# \tint Out_DataSize,\n\n# \tint In_InL3, // 1 if In comes from L3, 0 if it comes from L2\n# \tint Filter_InL3,\n# \tint Bias_InL3,\n# \tint Out_InL3,\n\n# \tint InFeat,\n# \tint OutFeat,\n# \tint Width,\n# \tint Height,\n\n# \tKernelOper_T ConvOper,\n# \tint Fcx,\n# \tint Fcy,\n# \tint Dcx,\n# \tint Dcy,\n# \tint Scx,\n# \tint Scy,\n# \tint ConvPad,\n\n# \tKernelOper_T PoolOper,\n# \tint Fpx,\n# \tint Fpy,\n# \tint Dpx,\n# \tint Dpy,\n# \tint Spx,\n# \tint Spy,\n# \tint PoolPad,\n\n# \tKernelOper_T ReLUOper\n# \t);\n\n# pylint: disable=too-many-arguments\n\n\ndef gen_at_grouped_conv_pool_relu(code_block: CodeBlock, name, in_size, out_size,\n filt_size, bias_size, in_dim, out_dim,\n at_conv, at_pool, at_active, gen_ctrl=None):\n if gen_ctrl is None:\n gen_ctrl = \"0\"\n else:\n gen_ctrl = gen_ctrl.ctrl_name\n\n code_block.write('{}(\"{}\", {}, {}, {}, {}, {}, 1, 1, 1, 1, {}, {}, {}, {},',\n GEN_GROUPED_CONV_POOL_RELU, name, gen_ctrl,\n at_conv.GroupIn, at_conv.GroupOut,\n in_size, filt_size, bias_size, out_size,\n in_dim.c, out_dim.c, in_dim.w, in_dim.h)\n code_block.indent()\n code_block.write('{}, {}, {}, {}, {}, {}, {}, {},',\n at_conv.ConvOper, at_conv.Fcx, at_conv.Fcy,\n at_conv.Dcx, at_conv.Dcy, at_conv.Scx, at_conv.Scy,\n at_conv.ConvPad)\n code_block.write('{}, {}, {}, {}, {}, {}, {}, {}, {});',\n at_pool.PoolOper, at_pool.Fpx, at_pool.Fpy,\n at_pool.Dpx, at_pool.Dpy, at_pool.Spx, at_pool.Spy,\n at_pool.PoolPad, at_active.ReLUOper)\n code_block.deindent()\n\n# extern void CNN_LinearReLU(\n# char *Name,\n\n# \tCNN_GenControl_T *Ctrl,\n\n# int In_DataSize,\n# int Filter_DataSize,\n# int Bias_DataSize,\n# int Out_DataSize,\n\n# int In_InL3,\n# int Filter_InL3,\n# int Bias_InL3,\n# int Out_InL3,\n\n# int InDim,\n# int OutDim,\n\n# KernelOper_T LinearOper,\n# KernelOper_T ReLUOper\n# );\n\n# pylint: disable=too-many-arguments\n\n\ndef gen_at_linear_relu(code_block: CodeBlock, name, in_size, out_size,\n filt_size, bias_size, in_dim, out_dim,\n at_linear, at_active, gen_ctrl=None):\n if gen_ctrl is None:\n gen_ctrl = \"0\"\n else:\n gen_ctrl = gen_ctrl.ctrl_name\n\n code_block.write('{}(\"{}\", {}, {}, {}, {}, {}, 1, 1, 1, 1, {}, {},',\n GEN_LINEAR_RELU, name, gen_ctrl,\n in_size, filt_size, bias_size, out_size,\n in_dim.size(), out_dim.size())\n code_block.indent()\n code_block.write('{}, {});',\n at_linear.LinearOper, at_active.ReLUOper)\n code_block.deindent()\n\n# int CNN_Mat2DTranspose(\n# char *Name,\n# CNN_GenControl_T *Ctrl,\n# int InOut_DataSize,\n# int In_InL3,\n# int Out_InL3,\n# int Width,\n# int Height,\n# KernelOper_T AddMatOper\n# )\n\n\ndef gen_at_2d_transpose(code_block: CodeBlock, name, in_size, out_size,\n in_dim, at_transpose_params, gen_ctrl=None):\n if gen_ctrl is None:\n gen_ctrl = \"0\"\n else:\n raise NotImplementedError(\"genctrl is not yet implemented\")\n\n code_block.write('{}(\"{}\", {}, {}, 1, 1, {}, {}, {});',\n GEN_2D_TRANSPOSE, name, gen_ctrl, in_size,\n at_transpose_params.Width, at_transpose_params.Height,\n at_transpose_params.TwoDTransposeOper)\n\n\n# extern void CNN_SoftMax(\n# char *Name,\n# \tCNN_GenControl_T *Ctrl,\n# int In_DataSize,\n# int Out_DataSize,\n# int In_InL3,\n# int Out_InL3,\n# int Dim,\n# KernelOper_T SoftMaxOper\n# );\n\n# pylint: disable=too-many-arguments\n\n\ndef gen_at_softmax(code_block: CodeBlock, name, in_size, out_size,\n in_dim, at_softmax, gen_ctrl=None):\n if gen_ctrl is None:\n gen_ctrl = \"0\"\n else:\n raise NotImplementedError(\"genctrl is not yet implemented\")\n\n code_block.write('{}(\"{}\", {}, {}, {}, 1, 1, {}, {});',\n GEN_SOFTMAX, name, gen_ctrl,\n in_size, out_size, in_dim.size(), at_softmax.SoftMaxOper)\n\n# pylint: disable=too-many-arguments\n\n\ndef gen_at_matrixadd(code_block: CodeBlock, name, in_size1, in_size2, out_size,\n in_dim, out_dim, at_matrixadd, gen_ctrl=None):\n if gen_ctrl is None:\n gen_ctrl = \"0\"\n else:\n raise NotImplementedError(\"genctrl is not yet implemented\")\n\n code_block.write('{}(\"{}\", {}, {}, {}, {}, 1, 1, 1, {}, {}, {}, {}, {});',\n GEN_MATADD, name, gen_ctrl,\n in_size1, in_size2, out_size, in_dim.shape[0], out_dim.shape[0],\n in_dim.shape[1], in_dim.shape[2], at_matrixadd.MatrixAddOper)\n\n# pylint: disable=too-many-arguments\n\n\ndef gen_at_matrixadddyn(code_block: CodeBlock, name, in_size1, in_size2, out_size,\n inq1, inq2, outq, in_dim, out_dim, at_matrixadd, gen_ctrl=None):\n if gen_ctrl is None:\n gen_ctrl = \"0\"\n else:\n raise NotImplementedError(\"genctrl is not yet implemented\")\n\n code_block.write('{}(\"{}\", {}, {}, {}, {}, 1, 1, 1, {}, {}, {}, {}, {}, {}, {}, {});',\n GEN_MATADDDYN, name, gen_ctrl,\n in_size1, in_size2, out_size,\n inq1, inq2, outq,\n in_dim.shape[0], out_dim.shape[0],\n in_dim.shape[1], in_dim.shape[2], at_matrixadd.MatrixAddOper)\n\n# convolution followed by a pool and optional relu\n# pylint: disable=too-many-branches\n\n\ndef gen_conv_pool_relu(name, conv_params, conv_q, pool_params, pool_q, act_params, act_q, code_block=None, at_ver=2, gen_ctrl=None):\n\n if gen_ctrl is None:\n gen_ctrl = GenCtrl(None, cname=name)\n else:\n gen_ctrl.cname = name\n\n if at_ver < 3:\n fsize = at_bits\n else:\n fsize = at_bits_and_q\n in_q = filter_q = out_q = bias_q = None\n in_dim = out_dim = None\n pad_compatibilities = []\n if conv_params is not None:\n at_conv_params = gen_conv_at_params(conv_params, conv_q, pad_compatibilities)\n in_dim = conv_params.in_dims[0]\n out_dim = conv_params.out_dims[0]\n filter_q = conv_q.weights_q\n in_q = conv_q.in_qs[0]\n out_q = conv_q.out_qs[0]\n bias_q = conv_q.biases_q\n else:\n at_conv_params = NO_CONV\n\n if pool_params is not None:\n at_pool_params = gen_pool_at_params(pool_params, pad_compatibilities)\n if in_dim is None:\n in_dim = pool_params.in_dims[0]\n out_dim = pool_params.out_dims[0]\n if in_q is None:\n in_q = pool_q.in_qs[0]\n out_q = pool_q.out_qs[0]\n else:\n at_pool_params = NO_POOL\n\n if act_params is not None:\n at_act_params = gen_active_at_params(act_params)\n if in_dim is None:\n in_dim = act_params.in_dims[0]\n if out_dim is None:\n out_dim = act_params.out_dims[0]\n if in_q is None:\n in_q = act_q.in_qs[0]\n out_q = act_q.out_qs[0]\n if act_params.activation == \"relu6\" and out_q.q != 0:\n gen_ctrl.ReluN = 6 << out_q.q\n gen_ctrl.ReluNNoNorm = 1\n else:\n at_act_params = NO_ACTIVATION\n\n if code_block is None:\n code_block = CodeBlock()\n\n if pad_compatibilities:\n reduction = PadDim.pad_compatibility_reduce(*pad_compatibilities,\n \"convolution padding is not compatible with pool padding\")\n if not reduction[2]: # default is balanced pad left\n at_pad_ctrl = next(i for i, v in enumerate(reduction) if v)\n LOG.debug(\"%s: generating pad control block\", name)\n gen_ctrl.PadType = at_pad_ctrl\n\n if not gen_ctrl.is_unmodified:\n gen_ctrl.gen_ctrl_decl(code_block)\n\n if conv_params is None:\n if in_q.bits != out_q.bits:\n raise NotImplementedError(\"only homogenious operations are supported at present\")\n LOG.debug(\"%s: pool relu inq %s outq %s control block\", name, in_q, out_q)\n gen_at_pool_relu(code_block, name, fsize(in_q), fsize(out_q),\n in_dim, out_dim, at_pool_params, at_act_params, gen_ctrl=gen_ctrl)\n else:\n if isinstance(at_conv_params, ConvATParam):\n LOG.debug(\"%s: conv pool relu inq %s outq %s control block\", name, in_q, out_q)\n gen_at_conv_pool_relu(code_block, name, fsize(in_q), fsize(out_q),\n fsize(filter_q), fsize(bias_q),\n in_dim, out_dim, at_conv_params, at_pool_params,\n at_act_params, gen_ctrl=gen_ctrl)\n elif isinstance(at_conv_params, GroupedConvATParam):\n LOG.debug(\"%s: grouped conv pool relu inq %s outq %s control block\", name, in_q, out_q)\n gen_at_grouped_conv_pool_relu(code_block, name, fsize(in_q), fsize(out_q),\n fsize(filter_q), fsize(bias_q),\n in_dim, out_dim, at_conv_params, at_pool_params,\n at_act_params, gen_ctrl=gen_ctrl)\n else:\n raise ValueError('Internal error')\n\n return code_block\n\n\ndef gen_pool_relu(name, pool_params, pool_q, act_params, act_q, code_block=None, at_ver=2, gen_ctrl=None):\n if gen_ctrl is None:\n gen_ctrl = GenCtrl(None, cname=name)\n else:\n gen_ctrl.cname = name\n\n if at_ver < 3:\n fsize = at_bits\n else:\n fsize = at_bits_and_q\n in_q = out_q = None\n in_dim = out_dim = None\n pad_compatibilities = []\n\n if pool_params is not None:\n at_pool_params = gen_pool_at_params(pool_params, pad_compatibilities)\n if in_dim is None:\n in_dim = pool_params.in_dims[0]\n out_dim = pool_params.out_dims[0]\n if in_q is None:\n in_q = pool_q.in_qs[0]\n out_q = pool_q.out_qs[0]\n else:\n at_pool_params = NO_POOL\n\n if act_params is not None:\n at_act_params = gen_active_at_params(act_params)\n if in_dim is None:\n in_dim = act_params.in_dims[0]\n if out_dim is None:\n out_dim = act_params.out_dims[0]\n if in_q is None:\n in_q = act_q.in_qs[0]\n out_q = act_q.out_qs[0]\n if act_params.activation == \"relu6\" and out_q.q != 0:\n gen_ctrl.ReluN = 6 << out_q.q\n gen_ctrl.ReluNNoNorm = 1\n else:\n at_act_params = NO_ACTIVATION\n\n if code_block is None:\n code_block = CodeBlock()\n\n if pad_compatibilities:\n reduction = PadDim.pad_compatibility_reduce(*pad_compatibilities,\n \"convolution padding is not compatible with pool padding\")\n if not reduction[2]: # default is balanced pad left\n at_pad_ctrl = next(i for i, v in enumerate(reduction) if v)\n gen_ctrl.PadType = at_pad_ctrl\n\n if not gen_ctrl.is_unmodified:\n gen_ctrl.gen_ctrl_decl(code_block)\n\n if in_q.bits != out_q.bits:\n raise NotImplementedError(\"only homogenious operations are supported at present\")\n if pool_params is None:\n raise NotImplementedError(\"activation layer on its own is not implemented at present\")\n gen_at_pool_relu(code_block, name, fsize(in_q), fsize(out_q),\n in_dim, out_dim, at_pool_params, at_act_params, gen_ctrl=gen_ctrl)\n return code_block\n\n\n# linear followed by an optional relu\n# pylint: disable=too-many-branches\n\n\ndef gen_linear_relu(name, linear_params, linear_q, act_params, act_q, code_block=None, at_ver=2, gen_ctrl=None):\n if gen_ctrl is None:\n gen_ctrl = GenCtrl(None, cname=name)\n else:\n gen_ctrl.cname = name\n\n if at_ver < 3:\n fsize = at_bits\n else:\n fsize = at_bits_and_q\n assert linear_params is not None, \"linear should always be included\"\n at_linear_params = gen_linear_at_params(linear_params)\n in_dim = linear_params.in_dims[0]\n out_dim = linear_params.out_dims[0]\n filter_q = linear_q.weights_q\n in_q = linear_q.in_qs[0]\n out_q = linear_q.out_qs[0]\n bias_q = linear_q.biases_q\n\n if act_params is not None:\n at_act_params = gen_active_at_params(act_params)\n out_q = act_q.out_qs[0]\n if act_params.activation == \"relu6\" and out_q.q != 0:\n gen_ctrl.ReluN = 6 << out_q.q\n gen_ctrl.ReluNNoNorm = 1\n else:\n at_act_params = NO_ACTIVATION\n\n if code_block is None:\n code_block = CodeBlock()\n\n gen_at_linear_relu(code_block, name, fsize(in_q), fsize(out_q),\n fsize(filter_q), fsize(bias_q),\n in_dim, out_dim, at_linear_params, at_act_params)\n return code_block\n\n\ndef gen_2d_transpose(name, transpose_params, transpose_q, code_block=None):\n at_transpose_params = gen_2d_transpose_at_params(transpose_params)\n in_dim = transpose_params.in_dims[0]\n in_q = transpose_q.in_qs[0]\n out_q = transpose_q.out_qs[0]\n\n if code_block is None:\n code_block = CodeBlock()\n\n gen_at_2d_transpose(code_block, name, at_bits(in_q), at_bits(out_q),\n in_dim, at_transpose_params)\n return code_block\n\n\ndef gen_softmax(name, softmax_params, softmax_q, code_block=None):\n at_softmax_params = gen_softmax_at_params(softmax_params)\n in_dim = softmax_params.in_dims[0]\n in_q = softmax_q.in_qs[0]\n out_q = softmax_q.out_qs[0]\n\n if code_block is None:\n code_block = CodeBlock()\n\n gen_at_softmax(code_block, name, at_bits(in_q), at_bits(out_q),\n in_dim, at_softmax_params)\n return code_block\n\n\ndef gen_matrixadd(name, matrixadd_params, matrixadd_q, code_block=None):\n at_matrixadd_params = gen_matrixadd_at_params(matrixadd_params)\n in_dim = matrixadd_params.in_dims[0]\n out_dim = matrixadd_params.out_dims[0]\n in_q1 = matrixadd_q.in_qs[0]\n in_q2 = matrixadd_q.in_qs[1]\n out_q = matrixadd_q.out_qs[0]\n\n if code_block is None:\n code_block = CodeBlock()\n\n gen_at_matrixadd(code_block, name, at_bits(in_q1), at_bits(in_q2), at_bits(out_q),\n in_dim, out_dim, at_matrixadd_params)\n return code_block\n\n\ndef gen_matrixadddyn(name, matrixadd_params, matrixadd_q, code_block=None):\n at_matrixadd_params = gen_matrixadddyn_at_params(matrixadd_params)\n in_dim = matrixadd_params.in_dims[0]\n out_dim = matrixadd_params.out_dims[0]\n in_q1 = matrixadd_q.in_qs[0]\n in_q2 = matrixadd_q.in_qs[1]\n out_q = matrixadd_q.out_qs[0]\n\n if code_block is None:\n code_block = CodeBlock()\n\n gen_at_matrixadddyn(code_block, name, at_bits(in_q1), at_bits(in_q2), at_bits(out_q),\n in_q1.q, in_q2.q, out_q.q, in_dim, out_dim, at_matrixadd_params)\n return code_block\n","sub_path":"tools/nntool/generation/code_generators.py","file_name":"code_generators.py","file_ext":"py","file_size_in_byte":27827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"464993434","text":"#!/usr/bin/python\nimport cgi\nimport os\nimport sys\nfrom urllib.parse import parse_qs, urlsplit,SplitResult,urlunsplit\nfrom urllib.error import URLError\nimport urllib.request\n\nclass url(object):\n def __init__(self,path):\n self.parsed = urlsplit(path)\n \n def get_server(self):\n \"\"\" Returns the host:port part of the url \"\"\"\n return(self.parsed.netloc)\n \n def get_fullpath(self):\n \"\"\" Returns the full path of the url with server and args removed \"\"\"\n return(self.parsed.path)\n \n def get_filename(self):\n \"\"\" Returns the end file name of the path only \"\"\"\n fullpath = self.parsed.path\n return(os.path.basename(fullpath))\n \n def get_directory(self):\n \"\"\" Returns the full path minus the file name \"\"\"\n fullpath = self.parsed.path\n # there is an inconsistency in os.path in that the path does not contain\n # training / unless the path is / which makes it unpredictible so we'll \n # have the root be empty string instead\n dir = os.path.dirname(fullpath)\n if (dir == '/'):\n dir = ''\n return(dir)\n \n def get_query_param(self,param_name):\n \"\"\" Returns the value of the named parameter or empty string if it's missing \"\"\"\n params = self.parsed.query\n values = parse_qs(params)\n if (param_name in values):\n return(values[param_name])\n else:\n return('')\n\n def replace_filename(self,newfilename):\n \"\"\" Returns a new URL with the filename replaced by newfilename \"\"\"\n newfullpath = self.get_directory() + '/' + newfilename\n newurl = SplitResult(self.parsed[0],self.parsed[1],newfullpath,self.parsed[3],self.parsed[4])\n return urlunsplit(newurl)\n\ndef get_command_list():\n return([\n 'LoadProject',\n 'LoadSchematic',\n 'OpenSchematic',\n 'LoadSystemDiagram',\n 'OpenSystemDiagram',\n 'Simulate',\n 'RunScript',\n 'OpenUserFolder',\n 'TileVertical',\n 'TileHorizontal',\n 'CloseWindows',\n 'OpenEM',\n 'OpenGraph',\n 'OpenProject'\n ])\n\ndef convert_command_to_URL(path):\n \"\"\" Checks whether url is a command and fixes it \"\"\"\n # Commands are somewhat in the form of URL's in that they need to be valid enough\n # that the command gets to the AWAC but past that we can change the rest into anything\n # we want.\n #\n # The current command syntax is:\n # http://localhost:port/COMMAND?arguments\n #\n # which we process in the following way:\n # 1) command is converted to abi/COMMAND.py\n\n # Define the commands we'll fix up\n command_list = get_command_list()\n # split up the url\n command_url = url(path)\n\n # we need to lowercase the command so we don't run into case issues\n command = command_url.get_filename()\n if (command in command_list):\n newcommand = \"abi/\" + command + '.py'\n\n # add args back in\n newurl = command_url.replace_filename(newcommand)\n return(newurl)\n else:\n # not a command, do nothing\n return(False)\n\ndef get_parameter(name):\n\n # form being executed from web server\n form = cgi.FieldStorage()\n if (name in form and form[name].value != \"\"):\n url = form[name].value\n\n # BUG/FEATURE\n # the CGI mechanism collapses multiple / to a single / (which would normally be on)\n # but it makes http:// into http:/ which is not legal. This adds the / back in.\n if (url.startswith('https:') and not url.startswith('https://')):\n url = url.replace('https:/','https://')\n if (url.startswith('http:') and not url.startswith('http://')):\n url = url.replace('http:/','http://') \n return(url)\n else:\n return False\n\n\ndef get_file(file_url, filetype):\n \"downloads a file to a temp directory and returns the name. can be emp, sch or sys\"\n file_extension = '.' + filetype\n if (file_url.startswith('http')):\n # web file request, we'll get the project\n filename=False\n try:\n (filename,headers) = urllib.request.urlretrieve(file_url)\n except URLError as e:\n print(e.reason)\n \n if not filename:\n return False\n \n # filename is the name of a temporary file. \n # on windows, the .emp extension may not be preserved\n if not filename.endswith(file_extension):\n os.rename(filename,filename+file_extension)\n filename = filename + file_extension\n \n # the .vin must have the same name\n ## just not sure we want to do this anymore\n #vinfilename = filename.replace('.emp','.vin')\n #vinurl = project_url.replace('.emp','.vin')\n #try:\n # (vname,vheaders) = urllib.request.urlretrieve(vinurl,vinfilename)\n #except URLError as e:\n # print(e.reason)\n return(filename)\n else:\n # local file request\n return(file_url)\n\n### These functions manage consistent formatting for the html log window\n### this output is checked for in tests so any edits here will require\n### changing the tests\n\ndef html_header():\n print(\"Content-type: text/html\\n\")\n print(\"In ABI Command\")\n print(\"\")\n print(\"

Command debug log

\")\n\ndef html_footer():\n print(\"\")\n\ndef html_message(message):\n print(\"

%s

\" % message)\n\ndef html_error(message):\n print(\"

%s

\" % message)\n\ndef html_test(message):\n print(\"EMULATING %s\" % message)\n\n# def html_test(message):\n \n# #dt = datetime.datetime.now()\n# #fp.write(dt.strftime('%y-%m-%d %H:%M:%S '))\n# print('

TESTMODE: %s

' % message)\n\n","sub_path":"ghs/abi/urltools.py","file_name":"urltools.py","file_ext":"py","file_size_in_byte":5835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"64827972","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nimport json\nimport os\nimport subprocess\nimport xmltodict\nfrom django.http import JsonResponse\nfrom django.shortcuts import render\nfrom django.views import View\nfrom wapps.assembler.assembler_utils.repository_details import KwRepositoryDetails\nfrom wapps.assembler.assembler_utils.verify_file_contents import VerifyFileContents\nfrom utils.directory_traversal_utils import delete_dir\nfrom utils.git_utils import get_repository_name, check_url_is_a_valid_repo\nfrom utils.navigator_util import Navigator\nnav_obj = Navigator()\nREF_FILE = os.path.join(nav_obj.get_katana_dir(), \"wapps\", \"assembler\", \"static\", \"assembler\",\n \"base_templates\", \"empty.xml\")\n\n\nclass AssemblerView(View):\n\n def get(self, request):\n return render(request, 'assembler/assembler.html')\n\n\nclass ConfigurationFileOps(View):\n\n def post(self, request):\n template = request.POST.get('filepath')\n filename = \"Untitled\"\n final_data = {}\n\n if template == \"false\":\n vfd_obj = VerifyFileContents(REF_FILE, REF_FILE)\n else:\n vfd_obj = VerifyFileContents(template, REF_FILE)\n\n output = vfd_obj.verify_file()\n if output[\"status\"]:\n final_data = vfd_obj.data\n\n return JsonResponse({\"xml_contents\": final_data, \"filename\": filename, \"status\": output[\"status\"], \"message\": output[\"message\"]})\n\n\ndef check_repo_availability(request):\n available = True\n url = request.POST.get('url')\n repo_name = get_repository_name(url)\n drivers = []\n if not check_url_is_a_valid_repo(url):\n available = False\n else:\n temp_directory = os.path.join(nav_obj.get_katana_dir(), \"wapps\", \"assembler\", \".data\")\n kw_repo_obj = KwRepositoryDetails(url, temp_directory)\n drivers = kw_repo_obj.get_pd_names()\n if os.path.isdir(kw_repo_obj.repo_directory):\n delete_dir(kw_repo_obj.repo_directory)\n return JsonResponse({\"available\": available, \"repo_name\": repo_name, \"drivers\": drivers})\n\n\ndef check_ws_repo_availability(request):\n available = True\n url = request.POST.get('url')\n repo_name = get_repository_name(url)\n if not check_url_is_a_valid_repo(url):\n available = False\n return JsonResponse({\"available\": available, \"repo_name\": repo_name})\n\n\ndef check_tools_repo_availability(request):\n available = True\n url = request.POST.get('url')\n repo_name = get_repository_name(url)\n if not check_url_is_a_valid_repo(url):\n available = False\n return JsonResponse({\"available\": available, \"repo_name\": repo_name})\n\n\ndef save_warhorn_config_file(request):\n nav_obj = Navigator()\n directory = request.POST.get('directory')\n if directory == \"default\":\n directory = os.path.join(nav_obj.get_katana_dir(), \"wapps\", \"assembler\", \".data\")\n filepath = os.path.join(directory, request.POST.get('filename') + \".xml\")\n json_data = json.loads(request.POST.get('json_data'))\n json_data[\"data\"][\"warriorframework\"] = \"Test\"\n data = xmltodict.unparse(json_data)\n response = _save_file(filepath, data)\n\n return JsonResponse(response)\n\n\ndef save_and_run_warhorn_config_file(request):\n nav_obj = Navigator()\n directory = request.POST.get('directory')\n if directory == \"default\":\n directory = os.path.join(nav_obj.get_katana_dir(), \"wapps\", \"assembler\", \".data\")\n filepath = os.path.join(directory, request.POST.get('filename') + \".xml\")\n json_data = json.loads(request.POST.get('json_data'))\n json_data[\"data\"][\"warriorframework\"] = \"Test\"\n data = xmltodict.unparse(json_data)\n response = _save_file(filepath, data)\n nav_obj = Navigator()\n warhorn_dir = nav_obj.get_warhorn_dir()\n current_dir = os.getcwd()\n output = \"\"\n if response[\"saved\"]:\n os.chdir(warhorn_dir)\n output = subprocess.Popen([\"python\", \"warhorn.py\", filepath], stdout=subprocess.PIPE).communicate()[0]\n os.chdir(current_dir)\n os.remove(filepath)\n return JsonResponse({\"output\": output})\n\n\ndef _save_file(filepath, data):\n filepath = os.path.join(filepath)\n message = \"\"\n saved = True\n try:\n with open(filepath, 'w') as f:\n f.write(data)\n except Exception as e:\n saved = False\n message = e\n return {\"saved\": saved, \"message\": message}\n\n\ndef get_data_directory(request):\n nav_obj = Navigator()\n directory = os.path.join(nav_obj.get_katana_dir(), \"wapps\", \"assembler\", \".data\")\n return JsonResponse({\"data_directory\": directory})\n","sub_path":"katana/wapps/assembler/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"388137880","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport json\nimport time\nimport grpc\nfrom google.protobuf import empty_pb2\nfrom google.protobuf import json_format\n\nexe_path = os.path.realpath(sys.argv[0])\nbin_path = os.path.dirname(exe_path)\nlib_path = os.path.realpath(bin_path + '../lib/python')\nsys.path.append(lib_path)\nsys.path.insert(0, '/home/msl/maum/lib/python')\nprint(sys.path)\nfrom maum.brain.nlp import nlp_pb2_grpc\nfrom maum.brain.nlp import nlp_pb2\nfrom maum.common import lang_pb2\nfrom common.config import Config\n\nremote = \"10.122.64.83:9823\"\nchannel = grpc.insecure_channel(remote)\nstub = nlp_pb2_grpc.NaturalLanguageProcessingServiceStub(channel)\n\nclass NLPAnalyzer:\n stub = None\n\n def __init__(self):\n #self.conf = Config()\n #self.conf.init('minds-ta.conf')\n #self.remote = \"localhost:9823\" + self.conf.get(\"minds-ta.nlp.3.kor.port\")\n self.morp_result = list()\n self.ner_result = list()\n self.sentence_result = list()\n self.morp_result_list = list()\n\n def __analyze__(self, text):\n # type: (object) -> object\n in_text = nlp_pb2.InputText()\n in_text.text = text\n in_text.lang = lang_pb2.kor\n in_text.split_sentence = True\n in_text.use_tokenizer = False\n\n message_result = stub.Analyze(in_text)\n\n morp_result = list()\n ner_result = list()\n sentence_result = list()\n sentence_morp_dict = ()\n for i in range(len(message_result.sentences)):\n sentence = message_result.sentences[i].text\n sentence_result.append(sentence.strip())\n morp_analysis = message_result.sentences[i].morps\n morp = \"\"\n for j in range(len(morp_analysis)):\n morp = morp_analysis[j].lemma + \"/\" + morp_analysis[j].type\n morp_result.append(morp.strip())\n self.sentence_morp_dict[sentence] = morp_result\n self.morp_result_list.append(morp_result)\n\n ner_analysis = message_result.sentences[i].nes\n\n ner = \"\"\n for j in range(len(ner_analysis)):\n ner = ner_analysis[j].text + \"/\" + ner_analysis[j].type\n if not ner:\n continue\n else:\n ner = ner.encode('utf-8').strip()\n ner_result.append(ner.strip())\n\n self.morp_result = morp_result\n self.ner_result = ner_result\n self.sentence_result = sentence_result\n\n def __extract_sentence__(self, text):\n in_text = nlp_pb2.InputText()\n in_text.text = text\n in_text.lang = lang_pb2.kor\n in_text.split_sentence = True\n in_text.use_tokenizer = False\n\n message_result = stub.Analyze(in_text)\n sentence_result = list()\n\n for i in range(len(message_result.sentences)):\n sentence = message_result.sentences[i].text\n sentence_result.append(sentence.strip())\n\n return sentence_result\n\n def __extract_morp__(self, text):\n in_text = nlp_pb2.InputText()\n in_text.text = text\n in_text.lang = lang_pb2.kor\n in_text.split_sentence = True\n in_text.use_tokenizer = False\n\n message_result = stub.Analyze(in_text)\n\n morp_result = list()\n for i in range(len(message_result.sentences)): # 찬란하/VA ㄴ/ETM 유산/NNG ,/SP 시티헌터/NNG ,/SP 주군/NNG 의/JKG 태양/NNG ,/SP 닥터/NNG 이방/NNG 이/VCP ㄴ/ETM 등/NNB\n morp_analysis = message_result.sentences[i].morps\n morp = \"\"\n for j in range(len(morp_analysis)):\n morp = morp + \" \" + morp_analysis[j].lemma + \"/\" + morp_analysis[j].type\n morp = morp.encode('utf-8').strip()\n morp_result.append(morp) # 찬란하/VA ㄴ/ETM 유산/NNG ,/SP 시티헌터/NNG ,/SP 주군/NNG 의/JKG 태양/NNG ,/SP 닥터/NNG 이방/NNG 이/VCP ㄴ/ETM 등/NNB\n\n return morp_result\n\n def __extract_ner__(self, text):\n in_text = nlp_pb2.InputText()\n in_text.text = text\n in_text.lang = lang_pb2.kor\n in_text.split_sentence = True\n in_text.use_tokenizer = False\n\n message_result = stub.Analyze(in_text)\n\n ner_result = list()\n for i in range(len(message_result.sentences)):\n ner_analysis = message_result.sentences[i].nes\n for j in range(len(ner_analysis)):\n ner = ner_analysis[j].text + \"/\" + ner_analysis[j].type\n if not ner:\n continue\n else:\n ner = ner.encode('utf-8').strip()\n ner_result.append(ner.strip())\n\n return ner_result\n\n def __extract_sentence_morp__(self, text):\n in_text = nlp_pb2.InputText()\n in_text.text = text\n in_text.lang = lang_pb2.kor\n in_text.split_sentence = True\n in_text.use_tokenizer = False\n\n message_result = stub.Analyze(in_text)\n\n sentence_morp_dict = dict()\n for i in range(len(message_result.sentences)):\n sentence = message_result.sentences[i].text\n morp_analysis = message_result.sentences[i].morps\n\n morp = \"\"\n for j in range(len(morp_analysis)):\n morp = morp_analysis[j].lemma + \"/\" + morp_analysis[j].type\n morp = morp.encode('utf-8').strip()\n\n sentence_morp_dict[morp] = dict\n\n return sentence_morp_dict\n\n def __extract_sentence_morp__(self, text):\n in_text = nlp_pb2.InputText()\n in_text.text = text\n in_text.lang = lang_pb2.kor\n in_text.split_sentence = True\n in_text.use_tokenizer = False\n\n message_result = stub.Analyze(in_text)\n sentence_morp_dict = dict()\n for i in range(len(message_result.sentences)):\n sentence = message_result.sentences[i].text\n morp_analysis = message_result.sentences[i].morps\n\n morp = \"\"\n for j in range(len(morp_analysis)):\n morp = morp + \" \" + morp_analysis[j].lemma + \"/\" + morp_analysis[j].type\n morp = morp.encode('utf-8').strip()\n sentence_morp_dict[morp] = sentence\n\n return sentence_morp_dict\n\n def __extract_sentence_morp__(self, text, flag):\n in_text = nlp_pb2.InputText()\n in_text.text = text\n in_text.lang = lang_pb2.kor\n in_text.split_sentence = True\n in_text.use_tokenizer = False\n\n message_result = stub.Analyze(in_text)\n sentence_morp_dict = dict()\n morp_result = list()\n for i in range(len(message_result.sentences)):\n sentence = message_result.sentences[i].text.strip()\n morp_analysis = message_result.sentences[i].morps\n\n morp = \"\"\n for j in range(len(morp_analysis)):\n morp = morp + \" \" + morp_analysis[j].lemma + \"/\" + morp_analysis[j].type\n morp = morp.encode('utf-8').strip()\n morp_result.append(morp)\n sentence_morp_dict[morp] = sentence\n\n return sentence_morp_dict, morp_result\n\n def __extract_dependency_parser__(self, text):\n in_text = nlp_pb2.InputText()\n in_text.text = text\n in_text.lang = lang_pb2.kor\n in_text.split_sentence = True\n in_text.use_tokenizer = False\n\n message_result = stub.Analyze(in_text)\n\n dependency_result_list = list()\n for i in range(len(message_result.sentences)):\n dependency_result = message_result.sentences[i].dependency_parsers\n\n for j in range(len(dependency_result)):\n dependency_dict = dict()\n dependency_dict[\"id\"] = dependency_result[j].seq\n dependency_dict[\"text\"] = dependency_result[j].text\n dependency_dict[\"head\"] = dependency_result[j].head\n dependency_dict[\"label\"] = dependency_result[j].label\n dependency_dict[\"weight\"] = dependency_result[j].weight\n if len(dependency_result[j].mods) == 0:\n dependency_dict[\"mods\"] = list()\n else:\n mods_list = list()\n for k in range(len(dependency_result[j].mods)):\n mods_list.append(dependency_result[j].mods[k])\n dependency_dict[\"mods\"] = mods_list\n\n dependency_result_list.append(dependency_dict)\n\n return dependency_result_list\n\n def get_dependency_parser_result(self, text):\n return self.__extract_dependency_parser__(text)\n\n def get_result_sentence_list(self, text):\n return self.__extract_sentence__(text)\n\n def get_result_morp_list(self, text):\n return_list = list()\n for temp in self.__extract_morp__(text): # 찬란하/VA ㄴ/ETM 유산/NNG ,/SP 시티헌터/NNG ,/SP 주군/NNG 의/JKG 태양/NNG ,/SP 닥터/NNG 이방/NNG 이/VCP ㄴ/ETM 등/NNB\n print (temp)\n tokens = temp.decode('utf-8').split()\n for token in tokens: # 찬란하/VA\n item = token.split(\"/\")\n if len(item) > 2:\n item = [\"/\"] + [item[-1]]\n return_list.append(\"/\".join([item[0], item[1].lower()])) # ['찬란하/va']\n return return_list\n\n def get_result_morp_str(self, text):\n result = \"\"\n for temp in self.__extract_morp__(text):\n result = result + \" \" + temp.decode('utf-8')\n return result.strip()\n\n def get_result_ner_list(self, text):\n return self.__extract_ner__()\n\n def get_result_sentence_morp_dict(self, text, flag):\n if flag == \"sentence_morp\":\n return self.__extract_sentence_morp__(text)\n elif flag == \"both\":\n return self.__extract_sentence_morp__(text, flag)\n\n def get_tree_result(self, content, original_token=False, sentence_list=False):\n ret = self.get_all_result(content)\n final_list = list()\n word_list = list()\n original_sent_list = list()\n for sent in ret.sentences:\n original_sent = list()\n if original_token:\n for word in sent.words:\n word_list.append(word.text)\n if sentence_list:\n for word in sent.words:\n original_sent.append(word.text)\n original_sent_list.append(\" \".join(original_sent))\n sent_list = list()\n for morph in sent.morph_evals: # target: 찬란한 # result: 찬란하/VA+ㄴ/ETM # m_end: 1\n tokens = morph.result.replace(\"+\", \"\\t\").replace(\"\\t/SW\", \"+/SW\").split(\"\\t\") # ['찬란하/VA', 'ㄴ/ETM']\n item_list = list()\n for token in tokens: # 찬란하/VA\n item = token.split(\"/\") # ['찬란하', 'VA']\n if len(item) > 2:\n item = [\"/\"] + [item[-1]]\n item_list.append(\"/\".join([item[0], item[1].lower()])) # ['찬란하/va'] # ['찬란하/va', 'ㄴ/etm']\n sent_list.append(item_list)\n final_list.append(sent_list)\n if original_token and sentence_list:\n return final_list, word_list, original_sent_list\n elif original_token and not sentence_list:\n return final_list, word_list\n elif not original_token and sentence_list:\n return final_list, original_sent_list\n else:\n return final_list\n\n def get_all_result(self, text):\n in_text = nlp_pb2.InputText()\n in_text.text = text\n in_text.lang = lang_pb2.kor\n in_text.split_sentence = True\n in_text.use_tokenizer = False\n in_text.level = 1\n in_text.keyword_frequency_level = 0\n ret = stub.Analyze(in_text)\n return ret\n\nif __name__ == \"__main__\":\n nlp_analyze = NLPAnalyzer()\n\n content = \"\"\"사립학교법인이 해산되는 경우들을 알려줘\"\"\"\n # content = \"\"\"MBC TV '밥상 차리는 남자'도 메인 연출자가 파업에 동참하면서 촬영이 중단됐다가 지난 14일 재개됐는데 역시 같은 이유다. '밥상 차리는 남자'는 파업 직전인 지난 2일 시작했기 때문에 파업으로 결방되면 방송을 시작하자마자 중단하는 꼴이 된다. 드라마로서는 첫 방송이 연기되는 것보다 방송 도중 결방되는 게 더 큰 타격이다. 흐름이 끊겨버려 안 하느니만 못한 상황이 되기 때문이다. 그로 인해 메인 연출자의 파업 참여 부담이 더 커진다. 반면 예능 프로그램의 경우는 애초 출연자와의 출연 계약 기간이라는 것이 없어 파업으로 결방돼도 계약상 문제가 >발생하는 경우가 거의 없고, 내용도 드라마처럼 연속성이 있는 게 아니라 결방의 부담이 드라마에 비해서는 현저히 적다. MBC노조 관계자는 \"아직 확정적으로 말하긴 힘들지만 앞으로 시작하는 드>라마의 경우는 대부분 제때 방송을 시작하기 쉽지 않을 것\"이라며 \"프로그램마다 사정이 다 복잡한 것은 사실이지만 파업이 길어지면 계획된 일정대로 가기 어렵다\"고 전했다.\"\"\"\n morph_content = nlp_analyze.get_tree_result(content)\n # morph_content = nlp_analyze.get_dependency_parser_result(content)\n #dp_content = nlp_analyze.get_dependency_parser_result(content)\n print (morph_content)\n\n\n #with open(\"tmp.json\", \"w\") as f:\n # json.dump(dp_content, f, sort_keys=True, indent=4)\n #print(json.dumps(dp_content, sort_keys=True, indent=4))\n #print(dp_content)\n","sub_path":"mrc_utils/morp_analyze_my.py","file_name":"morp_analyze_my.py","file_ext":"py","file_size_in_byte":13607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"458403998","text":"from .gene import Gene\n\n\nclass Population(object):\n\n def __init__(self, requirement, analysts):\n self.analysts = analysts\n self.requirement = requirement\n self.genes = self.generate_genes()\n\n def generate_genes(self):\n return [\n Gene(\n analyst=analyst,\n requirement=self.requirement\n ) for analyst in self.analysts\n ]\n","sub_path":"requirements-allocation/allocation/population.py","file_name":"population.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"469515245","text":"import cv2\nimport time\nimport os\nNAME='s3'\npath='train_data/{}'.format(NAME)\ncv2.namedWindow(\"camera\", 1)\nif not os.path.exists(path):\n os.makedirs(path)\ncap = cv2.VideoCapture(0)\ni = 0\nwhile (cap.isOpened()):\n # считываем кадр\n ret,img= cap.read()\n if ret == True:\n cv2.imshow(\"camera\", img)\n button = cv2.waitKey(1) & 0x77\n if button == ord('s'):\n cv2.imwrite(path+'/'+\"face-\" + str(i)+'.jpg', img)\n i=i+1\n if cv2.waitKey(10) == 27:\n break","sub_path":"lesson4.2.py","file_name":"lesson4.2.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"297656077","text":"class Bird:\r\n\twings = 2\r\n\r\n\t@classmethod\r\n\tdef fly(cls, name):\r\n\t\tprint(f'{name} flying with {cls.wings} wings...')\r\n\r\nBird.fly('Eagle')\r\nBird.fly('Parot')\r\nprint()\r\n\r\n# =========================\r\nclass Test:\r\n\tcount = 0\r\n\r\n\tdef __init__(self):\r\n\t\tTest.count += 1\r\n\r\n\t@classmethod\r\n\tdef get_number_of_objects(cls):\r\n\t\treturn cls.count\r\n\r\n\tdef delete_obj(self):\r\n\t\tdel self\r\n\t\tTest.count -= 1\r\n\r\nt = Test()\r\nt2 = Test()\r\nt3 = Test()\r\nt4 = Test()\r\nt.delete_obj()\r\nt4.delete_obj()\r\nprint(Test.get_number_of_objects())","sub_path":"older/object-oriented programming/23_class_method.py","file_name":"23_class_method.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"412865498","text":"import datetime\nfrom odoo import models, fields, api, exceptions\n\n\n\nclass ClinicReportWizard(models.TransientModel):\n _name = 'clinic.wizard'\n _description = \"Create Medical Report\"\n\n patient_id = fields.Char(string='Student Number', readonly=True)\n\n name = fields.Char(string='full name')\n\n student_ids = fields.Many2one(\n 'education.physical', string='Student Name')\n\n gender = fields.Char(string=\"Gender\")\n\n dob = fields.Date(string='Date Of Birth')\n age = fields.Char(string='Age')\n date = fields.Date(string='Date Requested')\n phone = fields.Char(string=\"Phone\")\n email = fields.Char(string=\"Email\")\n\n nationality = fields.Char(string='nationality')\n\n religion = fields.Char(string='religion')\n program = fields.Char(string='program')\n result = fields.Selection([\n ('لائق طبيا', 'لائق طبيا'),\n ('غير لائق طبيا', 'غير لائق طبيا'),\n ('أسباب أخرى', 'أسباب أخرى'),\n ], string=\"Result\")\n\n result_date = fields.Char(string=\"Result Date\", default=lambda self: datetime.datetime.today().strftime('%Y-%m-%d'))\n\n\n\n diagonis = fields.Text(string=\"Medical Report\")\n doctor_name = fields.Char(string=\"Doctor Name\", readonly=True, default=lambda self: self.env.user.name)\n\n #Function Wizards\n\n @api.onchange('student_ids')\n def create_wizard(self):\n filtered_b_ids = self.env['education.physical'].search([('id', '=', int(self.student_ids.id))])\n if filtered_b_ids:\n self.dob = filtered_b_ids.dob\n self.nationality = filtered_b_ids.nationality\n self.patient_id = filtered_b_ids.patient_id\n self.religion = filtered_b_ids.religion\n self.program = filtered_b_ids.program\n self.gender = filtered_b_ids.gender\n\n\n\n # Func send to registeration\n\n def create_medical_report(self):\n if (self.result and self.diagonis):\n pass\n else:\n raise exceptions.UserError(_('All Data must be completed'))\n\n result = self.env['student.registrar'].search([('form_number', '=', self.patient_id)])\n if result:\n for rec in result:\n rec.update(\n {\n 'result': self.result,\n 'doctor_comment': self.diagonis,\n 'result_data': self.result_date,\n 'doctor_name': self.doctor_name,\n\n }\n )\n else:\n\n self.env['student.registrar'].create({\n 'result_data': self.result_date,\n 'result': self.result,\n 'doctor_comment': self.diagonis,\n 'doctor_name': self.doctor_name,\n })\n\n\n # Pending\n # def lanch_physical_wizard(self):\n # self.env['napata.physical'].create({\n #\n # 'name': self.student_ids.name,\n # 'gender': self.gender,\n # 'dob': self.dob,\n # 'program': self.program,\n # 'nationality': self.nationality,\n # 'religion': self.religion,\n #\n # })\n\n\n\n\n\n","sub_path":"education_clinic/wizards/clinicreportwizard.py","file_name":"clinicreportwizard.py","file_ext":"py","file_size_in_byte":3151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"37689253","text":"from abc import ABCMeta,abstractmethod\r\nimport math\r\nimport numpy as np\r\nfrom scipy.stats import norm\r\nclass Clasificador:\r\n\r\n # Clase abstracta\r\n __metaclass__ = ABCMeta\r\n\r\n # Metodos abstractos que se implementan en casa clasificador concreto\r\n @abstractmethod\r\n # datosTrain: matriz numpy con los datos de entrenamiento\r\n # atributosDiscretos: array bool con la indicatriz de los atributos nominales\r\n # diccionario: array de diccionarios de la estructura Datos utilizados para la codificacion de variables discretas\r\n def entrenamiento(self,datosTrain,atributosDiscretos,diccionario):\r\n pass\r\n\r\n\r\n @abstractmethod\r\n # devuelve un numpy array con las predicciones\r\n def clasifica(self,datosTest,atributosDiscretos,diccionario):\r\n pass\r\n\r\n\r\n # Obtiene el numero de aciertos y errores para calcular la tasa de fallo\r\n # Entendemos que 'datos' y 'pred' son de la misma longitud para poder\r\n # realizar esta funcion sin controles\r\n # Asumiendo que tenemos dos clases (que tendran valores 0 y 1 despues de\r\n # haber sido 'traducidas' por el diccionario), damos al usuario la opcion\r\n # de solicitar la matriz de confusion para el analisis ROC\r\n # Denotamos a la clase 0 como negative, y 1 como positive\r\n def error(self,datos,pred, ROC = False):\r\n numErr = 0\r\n numOk = 0\r\n rowLen = len(datos[0])\r\n TN = 0\r\n TP = 0\r\n FN = 0\r\n FP = 0\r\n # Aqui se compara la prediccion (pred) con las clases reales y se calcula el error\r\n # Suponiendo que pred es una lista con las predicciones, y que por defecto\r\n # será de la misma longitud que el numero de filas de 'datos'\r\n # comparamos cada entrada de 'pred' con la ultima entrada de cada fila\r\n # de 'datos' (que es la clase real)\r\n for i in range(0,len(pred)):\r\n if pred[i] == datos[i][rowLen-1]:\r\n if pred[i]==0 and ROC: # True Negative\r\n TN += 1\r\n elif ROC: # True Positive\r\n TP += 1\r\n numOk = numOk + 1\r\n else:\r\n if pred[i] == 0 and ROC: # False Negative\r\n FN +=1\r\n elif ROC: # False positive\r\n FP +=1\r\n numErr = numErr + 1\r\n\r\n confMatrix = np.array([[TP, FP],[FN, TN]])\r\n # devuelve un numero entre 0,1 que representa el error\r\n error = numErr/(numErr + numOk)\r\n return (error, confMatrix)\r\n\r\n\r\n\r\n # Realiza una clasificacion utilizando una estrategia de particionado determinada\r\n def validacion(self,particionado,dataset,clasificador,seed=None, ROC = False):\r\n\r\n # Creamos las particiones siguiendo la estrategia llamando a particionado.creaParticiones\r\n listaParticiones = particionado.creaParticiones(dataset.datos, seed)\r\n # Para validacion cruzada: en el bucle hasta nv entrenamos el clasificador con la particion de train i\r\n # y obtenemos el error en la particion de test i\r\n # Inicializamos error a 0\r\n error = 0.0\r\n mConf = np.array([[0,0],[0,0]])\r\n for particion in listaParticiones:\r\n # Extraemos datos para el entrenamiento, y los de test\r\n trainData = dataset.extraeDatos(particion.indicesTrain)\r\n testData = dataset.extraeDatos(particion.indicesTest)\r\n # Entrenamos datos (es decir, generamos tablas de Naive Bayes)\r\n clasificador.entrenamiento(trainData, dataset.nominalAtributos, dataset.diccionarios)\r\n # Clasificamos usando las tablas que ya han sido asignadas\r\n pred = clasificador.clasifica(testData, dataset.nominalAtributos, dataset.diccionarios)\r\n # Sumamos el error de esta iteracion al error total\r\n err, mConfusion = self.error(testData, pred, ROC = True)\r\n error += err\r\n if ROC:\r\n mConf += mConfusion\r\n # Calculamos error medio a lo largo de todas las particiones.\r\n # En el caso de validacion simple, esto sera solo una particion\r\n error /= len(listaParticiones)\r\n\r\n if not ROC:\r\n return error\r\n else:\r\n mConf = mConf/len(listaParticiones)\r\n return error, mConf\r\n\r\n def validacionROC(self,particionado,dataset,clasificador,seed=None, alpha = 0.5):\r\n listaParticiones = particionado.creaParticiones(dataset.datos, seed)\r\n # Inicializamos error a 0\r\n error = 0.0\r\n mConf = np.array([[0,0],[0,0]])\r\n for particion in listaParticiones:\r\n # Extraemos datos para el entrenamiento, y los de test\r\n trainData = dataset.extraeDatos(particion.indicesTrain)\r\n testData = dataset.extraeDatos(particion.indicesTest)\r\n # Entrenamos datos (es decir, generamos tablas de Naive Bayes)\r\n clasificador.entrenamiento(trainData, dataset.nominalAtributos, dataset.diccionarios, self.laplace)\r\n # Clasificamos usando las tablas que ya han sido asignadas\r\n pred = clasificador.clasificaROC(testData, dataset.nominalAtributos, dataset.diccionarios, alpha)\r\n # Sumamos el error de esta iteracion al error total\r\n err, mConfusion = self.error(testData, pred, ROC = True)\r\n error += err\r\n mConf += mConfusion\r\n # Calculamos error medio a lo largo de todas las particiones.\r\n # En el caso de validacion simple, esto sera solo una particion\r\n error /= len(listaParticiones)\r\n mConf = mConf/len(listaParticiones)\r\n return error, mConf\r\n\r\n\r\n##############################################################################\r\n##############################################################################\r\n\r\n##############################################################################\r\n\r\nclass ClasificadorNaiveBayes(Clasificador):\r\n def __init__(self, laplace = False):\r\n self.laplace = laplace\r\n\r\n def entrenamiento(self,datosTrain,atributosDiscretos,diccionario):\r\n # There are len - 1 attributes, since last element in atributosDiscretos, the class, does not correspond to a proper attribute\r\n nAtributos = len(atributosDiscretos)-1\r\n # We extract a set with all classes in the data\r\n classes = diccionario[-1].values()\r\n # The following loop will build the NB tables\r\n # We create a list where the i-th element of the list is associated to the i-th attribute,\r\n # and for each attribute a dictionary with classes as keys is created\r\n # If the attribute is continuous, then for each class key the appropriate normal distribution is assigned as a value\r\n # Otherwise, for each class key a new dictionary is created, which has the attribute values as keys,\r\n # and for each value key,the number of data elements with the current class and current value is assigned as value\r\n\r\n\r\n self.prioris = prioris(datosTrain)\r\n attrTables = []\r\n for i in range(nAtributos):\r\n # For each attribute, a dictionary with all classes as values\r\n classesTable = dict()\r\n\r\n laplaceNeedsToBeApplied = False\r\n for clase in classes:\r\n # We extract a set with all attribute values for the i-th attribute\r\n attrValues = diccionario[i].values()\r\n # If i-th attribute discrete:\r\n # For each class, a dictionary with the i-th attribute values as keys\r\n classesTable[clase] = dict()\r\n if(atributosDiscretos[i]):\r\n for value in attrValues:\r\n # We count the number of elements of class clase out of the elements\r\n # where the i-th attribute has value value\r\n count = np.sum((datosTrain[:,i]==value ) & (datosTrain[:,-1]==clase))\r\n if count == 0 and self.laplace == True:\r\n laplaceNeedsToBeApplied = True\r\n #print('Laplace correction will be applied because no data with class ', clase, ' and ', i, 'th attribute with value ', value, ' was encountered in ', datosTrain)\r\n classesTable[clase][value] = count\r\n\r\n # If i-th attribute continuous:\r\n else:\r\n # We create an array with the i-th attribute values of data where class == clase\r\n\r\n filteredColumn = datosTrain[datosTrain[:,-1]==clase][:, i]\r\n # We extract mean and variance of the i-th column\r\n mean = np.mean(filteredColumn)\r\n std = np.std(filteredColumn)\r\n if math.isnan(std) or std==0:\r\n raise ZeroDivisionError('The standard deviation is 0 or NaN for the data ', filteredColumn)\r\n classesTable[clase] = norm(mean, std)\r\n # If needed, we apply Laplace correction to the table: classesTable[clase][value] needs to be incremented for all clase, value\r\n # Ineficiente a tope pero al menos no es larguisimo, nValuesxNclasses no deberia ser un valor muy grande\r\n if laplaceNeedsToBeApplied:\r\n for clase in classes:\r\n for value in attrValues:\r\n classesTable[clase][value] += 1\r\n\r\n attrTables.append(classesTable)\r\n self.NBTables = attrTables\r\n return\r\n\r\n\r\n def clasifica(self,datosTest,atributosDiscretos,diccionario):\r\n pred = []\r\n classes = diccionario[-1].values()\r\n for data in datosTest:\r\n maxClass = ['Initial maximum class', 0]\r\n for clase in classes:\r\n # Initialize the posteriori numerator as the priori probability for clase\r\n try:\r\n verodotpriori = self.prioris[clase]\r\n except:\r\n print('Clases:', classes)\r\n print('Prioris:', self.prioris)\r\n # Now we multiply by each P(attrN == valueofattrNinourdataelement | clase)\r\n nAtributos = len(atributosDiscretos)-1\r\n for i in range(nAtributos):\r\n # Value of the i-th attribute in the given datosTest element\r\n value = data[i]\r\n # We search in NBTables:\r\n # take the i-th position of the array, corresponding to the dictionary of the i-th attribute\r\n if atributosDiscretos[i]:\r\n # inside, if the attribute is discrete, the dictionary corresponding to the 'clase' key\r\n # and from there, the key 'value' (which is number of occurrences of class = clase and ithattribute = value)\r\n nOccurrences = self.NBTables[i][clase][value]\r\n # And divide by the number of occurrences of the other values given class = clase\r\n vero = nOccurrences/sum(self.NBTables[i][clase].values())\r\n else :\r\n # if the attribute is continuous, take the distribution stored in the 'clase' key\r\n # and calculate the pdf of the ith attribute being the value that our datosTest element has\r\n vero = self.NBTables[i][clase].pdf(value)\r\n verodotpriori *= vero\r\n\r\n # If the last calculated numerator is greater than the previous max, update the class and its numerator\r\n if verodotpriori > maxClass[1]:\r\n maxClass = [clase, verodotpriori]\r\n # We append to the pred array the class predicted for the datosTest element we are testing\r\n pred.append(maxClass[0])\r\n return pred\r\n\r\n def clasificaROC(self, datosTest, atributosDiscretos, diccionario, alpha):\r\n # A set with al classes\r\n classes = diccionario[-1].values()\r\n pr = dict()\r\n # For each data\r\n i = 0\r\n for dato in datosTest:\r\n pr[i] = dict()\r\n # And for each class\r\n for clase in classes:\r\n vero = 1\r\n j = 0\r\n # We calculate the product or all veros\r\n # of all attribute values in data, given the class\r\n for value in dato[:-1]:\r\n if atributosDiscretos[j]: #Nominal\r\n nOccurrences = self.NBTables[j][clase][value]\r\n vero *= nOccurrences/sum(self.NBTables[j][clase].values())\r\n else:#Discreto\r\n vero *= self.NBTables[j][clase].pdf(value)\r\n j+=1\r\n pr[i][clase] = vero\r\n i+=1\r\n # Positive class = 1: we get the probability for all data\r\n # given the positive class, and normalize the vector\r\n positiveProbs = np.array([pr[i][1] for i in range(len(datosTest))])\r\n positiveProbs /= np.linalg.norm(positiveProbs)\r\n pred = [1 if prob > alpha else 0 for prob in positiveProbs]\r\n return pred\r\n##############################################################################\r\n\r\nclass ClasificadorVecinosProximos(Clasificador):\r\n def __init__(self, k = 1, weight = False, max_weight = 100):\r\n self.k = k\r\n self.weight = weight\r\n self.max_weight = max_weight\r\n\r\n # KNN no requiere entrenamiento realmente.\r\n def entrenamiento(self,datostrain,atributosDiscretos,diccionario):\r\n self.trainData = datostrain\r\n\r\n # Funcion que clasifica un vector\r\n # v: vector a clasificar (fila de la matriz)\r\n # datos: matriz sobre la que clasificar\r\n # atributosDiscretos: array bool con la indicatriz de los atributos nominales\r\n # k: numero de vecinos proximos\r\n # descripcion: 1 - calcular distancia del vector v con respecto a cada\r\n # return: clase predicha\r\n # fila de la matriz datos\r\n # 2 - ordenar por distancias (menor a mayor)\r\n # 3 - obtener las k distancias menores\r\n # 4 - obtener las clases que aparecen\r\n # 5 - sin pesos: predecir la clase que más aparece\r\n # con pesos: para cada distancia obtener el inverso\r\n # para cada clase sumar esos inversos\r\n # predecir la mayor\r\n # (sirve para corregir en caso de que haya\r\n # mas vecinos proximos a grandes distancias)\r\n\r\n def clasificaFila(self, v, datos, atributosDiscretos, k, weight=False):\r\n dists = [(row[-1], distancia(v, row[:-1], atributosDiscretos[:-1])) for row in datos]\r\n dists = sorted(dists, key=lambda elem: elem[1])\r\n dists = dists[:k]\r\n classes = [x[0] for x in dists]\r\n if weight == False:\r\n clase = max(set(classes), key=classes.count)\r\n else:\r\n inv_dists = [(x[0], 1/x[1]) if x[1] != 0 else (x[0], self.max_weight*k) for x in dists]\r\n weighted_classes = []\r\n for x in set(classes):\r\n weights = [d[1] if x == d[0] else 0 for d in inv_dists]\r\n weighted_classes.append((x, sum(weights)))\r\n clase = max(weighted_classes, key=lambda x: x[1])[0]\r\n return clase\r\n\r\n\r\n def clasifica(self,datostest,atributosDiscretos,diccionario):\r\n datos_n = self.normalizarDatos(datostest, atributosDiscretos)\r\n pred = [self.clasificaFila(row[:-1], self.trainData, atributosDiscretos, self.k, self.weight) for row in datostest]\r\n return pred\r\n\r\n\r\n # Funcion que calcula las medias y las desviaciones tipicas de los\r\n # atributos Continuos de la matriz datos\r\n # datos: matriz de datos continuos y discretos\r\n # atributosDiscretos: array bool con la indicatriz de los atributos nominales\r\n def calcularMediasDesv(self,datos,atributosDiscretos):\r\n return [True if atr == True else (np.mean(col), np.std(col)) for col, atr in zip(datos.T,atributosDiscretos)]\r\n\r\n # Funcion que normaliza los atributos continuos de la matriz datos\r\n # datos: matriz de datos continuos y discretos\r\n # atributosDiscretos: array bool con la indicatriz de los atributos nominales\r\n def normalizarDatos(self,datos,atributosDiscretos):\r\n avg = 0\r\n std = 1\r\n med_desv = self.calcularMediasDesv(datos,atributosDiscretos)\r\n datos_normalizados = np.array([x if atr == True else (x - atr[avg])/atr[std] for x,atr in zip(datos.T, med_desv)])\r\n return datos_normalizados.T\r\n\r\n\r\n\r\n\r\n##############################################################################\r\n##############################################################################\r\n\r\nclass ClasificadorRegresionLogistica(Clasificador):\r\n def __init__(self, cteApr=1, nEpocas=15):\r\n self.cteApr = cteApr\r\n self.nEpocas = nEpocas\r\n\r\n def entrenamiento(self, datosTrain, atributosDiscretos, diccionario):\r\n self.w = np.random.rand(len(atributosDiscretos)) - 0.5\r\n for epoca in range(self.nEpocas):\r\n for dato in datosTrain:\r\n x = np.append([1], dato[:-1])\r\n clase = 1 if dato[-1] == 1 else 0\r\n # w = w - nu*(sigm(wx)-clase)x\r\n # If dot product is too low, 1/ e^inf could incur in numerical values\r\n # So we replace 1/e^inf with 0\r\n sigarg = np.dot(self.w,x)\r\n sig = sigmoidal(sigarg) if sigarg > -600 else 0\r\n coefficient = self.cteApr * (sig - clase)\r\n self.w -= coefficient*x\r\n\r\n def clasifica(self, datosTest, atributosDiscretos, diccionario):\r\n pred = []\r\n for data in datosTest:\r\n x = np.append([1], data[:-1])\r\n sigarg = np.dot(self.w,x)\r\n vero = sigmoidal(sigarg) if sigarg > -600 else 0\r\n pred.append( 1 if vero > 0.5 else 0)\r\n return pred\r\n\r\n\r\n\r\n##############################################################################\r\n\r\n\r\n##############################################################################\r\n\r\n# Funciones auxiliares\r\n\r\n# Funcion para calcular los prioris del conjunto de datos de train\r\n# Recibe una matriz np con datos como filas. Ultima columna corresponde a la clase\r\n# Devuelve un diccionario que tiene las clases como keys, y el priori de la clase como valor\r\n\r\ndef prioris(datosTrain):\r\n prioris = dict()\r\n nDatos = len(datosTrain)\r\n classIdx = np.size(datosTrain,1)-1\r\n for dato in datosTrain:\r\n clase = dato[classIdx]\r\n if clase not in prioris.keys():\r\n prioris[clase] = 1/nDatos\r\n else :\r\n prioris[clase] += 1/nDatos\r\n return prioris\r\n\r\n# Funcion para calcular la sigmoidal\r\ndef sigmoidal(t):\r\n return 1 / (1 + math.exp(-t))\r\n\r\n# Funcion para calcular distancia entre dos vectores\r\n# v1,v2: vectores de igual longitud\r\n# atributes: vector de atributos\r\n# si el atributo es nominal, se utiliza la distancia de Manhattan\r\ndef distancia(v1, v2, atributes):\r\n dst = []\r\n for x,y,atr in zip(v1,v2,atributes):\r\n if atr == True:\r\n if x == y:\r\n dst.append(0)\r\n else:\r\n dst.append(1)\r\n else:\r\n dst.append((x - y)**2)\r\n return math.sqrt(sum(dst))\r\n","sub_path":"p2/Clasificador.py","file_name":"Clasificador.py","file_ext":"py","file_size_in_byte":19542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"512891142","text":"\"\"\" gomoku game state representation \"\"\"\nimport numpy as np\nfrom feature import diff, defaultdict\n\n\nDIRECTIONS = [(1, 0), (0, 1), (1, 1), (1, -1)]\n\n\nclass State:\n def __init__(self):\n self.player = 1 # 1 for black and -1 for white\n self.board = np.zeros(shape=(15, 15), dtype=np.int8)\n self.history = list()\n self.features = defaultdict()\n self.end = False\n\n def _count(self, x, y, dx, dy):\n if x >= 15 or x < 0 or y >= 15 or y < 0 or self.board[x, y] != self.player:\n return 0\n return 1 + self._count(x + dx, y + dy, dx, dy)\n\n def _win(self, x, y):\n for dx, dy in DIRECTIONS:\n count = self._count(x + dx, y + dy, dx, dy) + \\\n self._count(x - dx, y - dy, -dx, -dy)\n if count == 4 or (count > 4 and self.player == -1):\n return True\n return False\n\n def _long(self, x, y):\n for dx, dy in DIRECTIONS:\n if self._count(x + dx, y + dy, dx, dy) + \\\n self._count(x - dx, y - dy, -dx, -dy) > 4:\n return True\n return False\n\n def _build(self, x, y, dx, dy, result):\n if 0 <= x <= 14 and 0 <= y <= 14 and self.board[x, y] == self.player:\n result.append((x, y))\n self._build(x + dx, y + dy, dx, dy, result)\n\n def highlight(self, x, y):\n assert self.end\n for dx, dy in DIRECTIONS:\n if self._count(x + dx, y + dy, dx, dy) + \\\n self._count(x - dx, y - dy, -dx, -dy) >= 4:\n break\n result = [(x, y)]\n self._build(x + dx, y + dy, dx, dy, result)\n self._build(x - dx, y - dy, -dx, -dy, result)\n return result\n\n def move(self, x, y):\n assert self.board[x, y] == 0 and not self.end\n new = diff(self, x, y)\n self.features.add(new)\n self.board[x, y] = self.player\n self.history.append((x, y))\n if self._win(x, y):\n self.end = True\n else:\n if new[\"-o-oo-\"] + new[\"-ooo-\"] >= 2 or \\\n new[\"four-o\"] + new[\"-oooo-\"] >= 2 or self._long(x, y):\n self.end = True\n self.player = -self.player\n return self.end\n\n def rewind(self):\n x, y = self.history.pop()\n self.board[x, y] = 0\n self.features.sub(diff(self, x, y))\n if self.end:\n self.end = False\n else:\n self.player = -self.player\n\n def __str__(self):\n return str(self.board).replace(\"-1\", \"x\").replace(\" 1\", \"o\").replace(\" 0\", \"+\")\n","sub_path":"python/state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"75236527","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom scipy import optimize\n\n################################################################################\ndef hist_err(values,bins=100,range=None,fmt='o',color='blue',ecolor='black'):\n\n nentries_per_bin, bin_edges, patches = plt.hist(values,bins=bins,\n range=range,alpha=0.0) # Make histogram transparent.\n\n # Create an errorbar plot using the info from the histogram.\n bin_width = bin_edges[1] - bin_edges[0] # Assumes evenly spaced bins.\n xpts = bin_edges[0:-1] + bin_width/2.0 # Get the bin centers and leave off\n # the last point which is the high\n # side of a bin.\n\n ypts = nentries_per_bin\n xpts_err = bin_width/2.0\n ypts_err = np.sqrt(nentries_per_bin) # Use np.sqrt to take square root\n # of an array. We'll assume Gaussian\n # errors here.\n\n ret = plt.errorbar(xpts, ypts, xerr=xpts_err, yerr=ypts_err,fmt=fmt,\n color=color,ecolor=ecolor)\n\n return ret,xpts,ypts,xpts_err,ypts_err\n\n\n################################################################################\ndef fit(func,xdata,ydata,starting_vals=None,yerr=None):\n\n npars = len(starting_vals)\n\n fit_params, cov_mat = optimize.curve_fit(func, xdata, ydata, starting_vals, sigma=yerr)\n\n fit_params_errs = []\n for i in xrange(npars):\n fit_params_errs.append(np.sqrt(cov_mat[i][j]))\n\n return fit_params,fit_params_errs,cov_mat\n\n\n\n\n","sub_path":"lichen/lichen.py","file_name":"lichen.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"298299701","text":"CARD_NUMBER = '23456789TJQKA'\nHIGHER_J = set(['J', 'Q', 'K', 'A'])\n\n# card_list length should be 5 or 6\n\n\ndef get_expected_value(card_list=[]):\n prob_method = [(straight_flush, 128),\n (four_of_a_kind, 128),\n (full_house, 64),\n (flush, 32),\n (straight_t, 32),\n (straight, 16),\n (three_of_a_kind, 16),\n (two_pairs_j, 16),\n (two_pairs, 8),\n (one_pair_j, 4),\n (one_pair, 2)]\n expectedValue = 0.0\n for card_type, value in prob_method:\n expectedValue += card_type(card_list) * value\n return expectedValue\n\n\ndef straight_flush(card_list=[]):\n s_string = 'A23456789TJQKA'\n suit_dict = {}\n for card in card_list:\n if card[1] not in suit_dict:\n suit_dict[card[1]] = set([card[0]])\n else:\n suit_dict[card[1]].add(card[0])\n if len(suit_dict) == 4:\n return 0.0\n cards_to_deal = 7 - len(card_list)\n prob = 0.0\n for _, card_number_list in suit_dict.items():\n miss_one = False\n if len(card_number_list) + cards_to_deal < 5:\n continue\n for i in range(len(s_string) - 4):\n straight_set = set(list(s_string[i:i + 5]))\n miss_card = len(straight_set - card_number_list)\n if miss_card == 0:\n return 1.0\n if miss_card <= cards_to_deal:\n if cards_to_deal == 1:\n # prob += 1 / 46.0\n if miss_one is False:\n prob += 0.021739130434782608\n else:\n if miss_card == 2:\n # get two card to target cards to become straight flush\n # prob = (1 / 47) * (1 / 46) * 2\n if miss_one is False:\n prob += 0.0009250693802035153\n else:\n # miss_card == 1\n # get one card to become straight and any other one card\n # enter this block, all prob are considered\n # prob += (1 / 47) + (46 / 47) * (1 / 46)\n prob = 0.0425531914893617\n miss_one = True\n return prob\n\n\ndef four_of_a_kind(card_list=[]):\n card_number_list = set([c[0] for c in card_list])\n if len(card_number_list) > 4:\n return 0.0\n number_dict = {}\n for n in card_number_list:\n number_dict[n] = 0\n cards_to_deal = 7 - len(card_list)\n\n for c in card_list:\n number_dict[c[0]] += 1\n if number_dict[c[0]] == 4:\n return 1.0\n\n prob = 0.0\n if cards_to_deal == 1:\n for _, n in number_dict.items():\n if n == 3:\n prob += 0.021739130434782608\n else:\n # cards_to_deal == 2:\n if len(card_number_list) == 4:\n # card type should be 2 1 1 1, need to get two same cards with the pair\n # prob = 2 / 47 * 1 / 46\n prob = 0.0009250693802035153\n elif len(card_number_list) == 2:\n # card type should be 3 2\n # need to get two same cards with the pair or one of the 3 same kind and any other one\n # prob = 2 / 47 * 1 / 46\n # prob += 1 / 47 * 2\n prob = 0.043478260869565216\n else:\n # card type could be 2 2 1 or 3 1 1\n for _, n in number_dict.items():\n if n == 2:\n # card type be 2 2 1\n # need to get 2 same card from the pairs\n # prob = 4 / 47 * 1 / 46\n prob = 0.0018509949097639982\n break\n if n == 3:\n # card type should be 3 1 1\n # need to get one of the 3 same kind and any other one\n # prob = 1 / 47 * 2\n prob = 0.0425531914893617\n break\n return prob\n\n\ndef full_house(card_list=[]):\n card_number_list = set([c[0] for c in card_list])\n if len(card_number_list) > 4:\n return 0.0\n number_dict = {}\n for n in card_number_list:\n number_dict[n] = 0\n cards_to_deal = 7 - len(card_list)\n count_dict = {1: 0, 2: 0, 3: 0, 4: 0}\n\n for c in card_list:\n number_dict[c[0]] += 1\n\n for _, n in number_dict.items():\n count_dict[n] += 1\n if count_dict[3] > 0:\n if count_dict[2] > 0 or count_dict[3] > 1:\n return 1.0\n if count_dict[4] == 1 and count_dict[2] == 1:\n return 1.0\n\n prob = 0.0\n if cards_to_deal == 1:\n if count_dict[4] == 1:\n # card type should be 4 1 1\n # need get one same with the single cards\n # prob = 6 / 46\n prob = 0.13043478260869565\n else:\n if count_dict[3] > 0:\n # card type should be 3 1 1 1\n # need get one more card same with the single cards\n # prob = 9 / 46\n prob = 0.1956521739130435\n elif count_dict[2] > 1:\n # card type could be 2 2 1 1 or 2 2 2\n # need get one card same with the pairs\n prob = 2 * count_dict[2] / 46.0\n else:\n # cards_to_deal == 2\n if count_dict[4] == 1:\n # card type should be 4 1\n # need get one same card with single and any other card or\n # get two same other cards\n # prob = 3 / 47 + 44 / 47 * 3 / 46\n # prob += 44 / 47 * 3 / 46\n prob = 0.18593894542090658\n else:\n if count_dict[3] > 0:\n # card type should be 3 1 1\n # need get one from the single and any other card or\n # get two same other cards\n # prob = 6 / 47 + 41 / 47 * 6 / 46\n # prob = 41 / 47 * 3 / 46\n prob = 0.29833487511563367\n elif count_dict[2] > 1:\n # card type should be 2 2 1\n # need get one same with the pairs and any one other or\n # get two same cards with the single\n # prob = 4 / 47 + 43 / 47 * 4 / 46\n # prob += 3 / 47 * 2 / 46\n prob = 0.16743755781683625\n else:\n # count_dict[2] == 1:\n # card type should be 2 1 1 1\n # need get one same with the pair and one from the single or\n # two same cards with the single\n # prob = 2 / 47 * 9 / 46 * 2\n # prob += 9 / 47 * 2 / 46\n prob = 0.02497687326549491\n return prob\n\n\ndef flush(card_list=[]):\n suit_list = set([c[1] for c in card_list])\n if len(suit_list) == 4:\n return 0.0\n suit_dict = {}\n for s in suit_list:\n suit_dict[s] = 0\n cards_to_deal = 7 - len(card_list)\n\n for c in card_list:\n suit_dict[c[1]] += 1\n if suit_dict[c[1]] > 4:\n return 1.0\n\n prob = 0.0\n if cards_to_deal == 1:\n # need to has 4 cards in a suit\n for _, n in suit_dict.items():\n if n == 4:\n # prob = 9 / 46\n prob = 0.1956521739130435\n break\n else:\n # cards_to_deal == 2\n # need the type with 3 1 1, 3 2, or 4 1\n # if has 3 of a suit, need get two more the same suit\n # if has 4 of a suit, need get one more the same suit and any other one\n # or equal 1 - porb to get two other suits\n for _, n in suit_dict.items():\n if n == 3:\n # prob = 10 / 47 * 9 / 46\n prob = 0.041628122109158186\n break\n elif n == 4:\n # prob = 1 - 38 / 47 * 37 / 46\n prob = 0.3496762257169288\n break\n return prob\n\n\ndef straight_t(card_list=[]):\n straight_set = set(['T', 'J', 'Q', 'K', 'A'])\n card_number_list = set([c[0] for c in card_list])\n cards_to_deal = 7 - len(card_list)\n miss_card = len(straight_set - card_number_list)\n if miss_card == 0:\n return 1.0\n if miss_card <= cards_to_deal:\n if cards_to_deal == 1:\n # prob += 4 / 46.0\n prob = 0.08695652173913043\n else:\n if miss_card == 2:\n # get two card to become straight\n # prob += (4 / 47) * (4 / 46) * 2\n prob = 0.014801110083256245\n else:\n # miss_card == 1\n # get one card to become straight and any other one card\n # prob += (4 / 47) + (43 / 47) * (4 / 46)\n prob = 0.16466234967622573\n else:\n prob = 0.0\n return prob\n\n\ndef straight(card_list=[]):\n s_string = 'A23456789TJQKA'\n card_number_list = set([c[0] for c in card_list])\n cards_to_deal = 7 - len(card_list)\n prob = 0.0\n miss_one = False\n for i in range(len(s_string) - 4):\n straight_set = set(list(s_string[i:i + 5]))\n miss_card = len(straight_set - card_number_list)\n if miss_card == 0:\n return 1.0\n if miss_card <= cards_to_deal:\n if cards_to_deal == 1:\n # prob += 4 / 46.0\n if miss_one is False:\n prob += 0.08695652173913043\n else:\n if miss_card == 2:\n # get two card to become straight\n # prob += (4 / 47) * (4 / 46) * 2\n if miss_one is False:\n prob += 0.014801110083256245\n else:\n # miss_card == 1\n # get one card to become straight and any other one card\n # enter this block, all prob are considered\n # prob += (1 / 47) + (46 / 47) * (1 / 46)\n prob = 0.16466234967622573\n miss_one = True\n return prob\n\n\ndef three_of_a_kind(card_list=[]):\n card_number_list = set([c[0] for c in card_list])\n if len(card_number_list) == 6:\n return 0.0\n number_dict = {}\n for n in card_number_list:\n number_dict[n] = 0\n cards_to_deal = 7 - len(card_list)\n\n for c in card_list:\n number_dict[c[0]] += 1\n if number_dict[c[0]] == 3:\n return 1.0\n\n prob = 0.0\n if cards_to_deal == 1:\n # only need to consider with 2 types, 2 1 1 1 1 and 2 2 1 1\n if len(card_number_list) == 4:\n # get one same with the pairs\n # prob = 4 / 47\n prob = 0.0851063829787234\n else:\n # len(card_number_list) == 5\n # get one same with the pair\n # prob = 2 / 47\n prob = 0.0425531914893617\n else:\n # cards_to_deal == 2\n if len(card_number_list) == 5:\n # card type should be 1 1 1 1 1\n # need get two same card with any card in card_list\n # prob = 3 / 47 * 2 / 46 * 5\n prob = 0.013876040703052728\n elif len(card_number_list) == 4:\n # card type should be 2 1 1 1\n # get two same card with single card in card_list or one card same with pair and any other one\n # prob = 3 * (3 / 47 * 2 / 46)\n # prob += (2 / 47 + 45 / 47 * 2 / 46)\n prob = 0.09250693802035152\n else:\n # len(card_number_list) == 3\n # card type should be 2 2 1\n # get two cards same with the single card or\n # any one card same with the pairs\n # prob = 2 / 47 * 1 / 46\n # prob += 4 / 47 + 43 / 47 * 4 / 46\n prob = 0.16558741905642924\n return prob\n\n\ndef two_pairs_j(card_list=[]):\n card_number_list = set([c[0] for c in card_list])\n number_dict = {}\n for n in card_number_list:\n number_dict[n] = 0\n pair_count = 0\n cards_to_deal = 7 - len(card_list)\n big_pair = 0\n bigger_set = set()\n\n for c in card_list:\n number_dict[c[0]] += 1\n if number_dict[c[0]] == 2:\n pair_count += 1\n if c[0] in HIGHER_J:\n big_pair += 1\n\n if big_pair > 0 and pair_count > 2:\n # already meet the goal\n return 1.0\n\n bigger_set = HIGHER_J.intersection(card_number_list)\n prob = 0.0\n # print('number_dict:{}'.format(number_dict))\n # print('target_card:{}'.format(target_card))\n # print('pair_count:{}'.format(pair_count))\n \"\"\"\n if pair_count == 3:\n # no chance\n prob = 0.0\n \"\"\"\n if pair_count == 2:\n if cards_to_deal == 1:\n # card type should be 2 2 1 1\n # get one bigger care to become a pair\n prob = len(bigger_set) * 3 / 46.0\n else:\n # card type should be 2 2 1\n if len(bigger_set) == 0:\n # get two same bigger cards\n # prob = (4 * 4 / 47) * (3 / 46)\n prob = 0.022201665124884366\n else:\n # len(bigger_set) == 1\n # get one card same with the bigger card and any one other card or\n # get two same bigger cards\n # prob = (3 / 47) + (44 / 47) * (3 / 46)\n # prob += (3 * 4 / 47) * (3 / 46)\n prob = 0.14153561517113783\n elif pair_count == 1:\n if cards_to_deal == 1:\n # card type should be 2 1 1 1 1\n if len(bigger_set) == 0:\n prob = 0.0\n else:\n if big_pair == 1:\n # get one more pair same with single\n # prob = 4 * 3 / 46\n prob = 0.2608695652173913\n else:\n # get one more pair same with bigger single\n prob = 3 * len(bigger_set) / 46.0\n else:\n # cards_to_deal == 2\n # card type should be 2 1 1 1\n if len(bigger_set) == 0:\n # get two same bigger cards\n # prob = (4 * 4 / 47) * (3 / 46)\n prob = 0.022201665124884366\n else:\n if big_pair == 1:\n # get one single and any other or\n # get two same other cards\n # prob = (3 * 3 / 47) + ((38 / 47) * (3 * 3 / 46))\n # prob += (4 * 9 / 47) * (3 / 46)\n prob = 0.3996299722479186\n else:\n # get one card same with bigger single and any other one or\n # get two same bigger cards not same with bigger single\n # prob = (3 * len(bigger_set) / 47) + ((47 - 3 * len(bigger_set)) / 47) * (3 * len(bigger_set) / 46)\n # prob += ((4 * (4 - len(bigger_set))) / 47 ) * (3 / 46)\n prob = ((267 - 9 * len(bigger_set)) * len(bigger_set) + 48) / 2162.0\n else:\n # pair_count == 0\n if cards_to_deal == 1:\n return 0.0\n else:\n # card type should be 1 1 1 1 1\n # get one bigger single and any other single\n # porb = (3 * len(bigger_set) / 47) * (3 * 4 / 46)\n prob = (36 * len(bigger_set)) / 2162.0\n return prob\n\n\ndef two_pairs(card_list=[]):\n card_number_list = set([c[0] for c in card_list])\n number_dict = {}\n for n in card_number_list:\n number_dict[n] = 0\n pair_count = 0\n cards_to_deal = 7 - len(card_list)\n\n for c in card_list:\n number_dict[c[0]] += 1\n if number_dict[c[0]] == 2:\n pair_count += 1\n\n if pair_count > 1:\n return 1.0\n\n if pair_count == 1:\n # need one more cards\n if cards_to_deal == 1:\n # card type should be 2 1 1 1 1\n # get one from the card already had one(4 kinds card)\n # prob = 3 * 4 / 46.0\n prob = 0.2608695652173913\n else:\n # card type should be 2 1 1 1\n # 1 - prob get two cards without any pair - prob get one same with pair and one other - prob two same with the pair\n # prob = 1 - (4 * 9 / 47) * (4 * 8 / 46)\n # prob -= ((2 / 47) * (9 * 4 / 46) * 2)\n # prob -= (2 / 47) * (1 / 46)\n # prob = 0.39962997224791863\n # get one from single and any other or\n # get two same cards from other\n # prob = (3 * 3 / 47) + (38 / 47) * (3 * 3 / 46)\n # prob += (9 * 4 / 47) * 3 / 46\n prob = 0.3996299722479186\n else:\n # pair_count == 0\n if cards_to_deal == 2:\n # get two different cards from single\n # prob = (3 * 5 / 47) * (3 * 4 / 46) * 2\n prob = 0.16651248843663274\n else:\n prob = 0.0\n return prob\n\n\ndef one_pair_j(card_list=[]):\n card_number_list = set([c[0] for c in card_list])\n number_dict = {}\n bigger_than_J_count = 0\n for n in card_number_list:\n number_dict[n] = 0\n if n in HIGHER_J:\n bigger_than_J_count += 1\n pair_count = 0\n cards_to_deal = 7 - len(card_list)\n target_card = None\n\n for c in card_list:\n number_dict[c[0]] += 1\n if number_dict[c[0]] == 2:\n pair_count += 1\n if c[0] in HIGHER_J:\n target_card = c[0]\n if target_card is not None:\n # already meet the goal\n return 1.0\n\n prob = 0.0\n if cards_to_deal == 1:\n if bigger_than_J_count > 0:\n # get an other bigger than J\n prob = 3 * bigger_than_J_count / 46.0\n else:\n if bigger_than_J_count > 0:\n # get one same with the bigger and anyother or\n # get two same cards of other bigger cards\n # prob = 3 * bigger_than_J_count / 47 + ((47 - 3 * bigger_than_J_count) / 47) * (3 * bigger_than_J_count / 46)\n # prob += (4 * (4 - bigger_than_J_count) / 47) * (3 / 46)\n prob = ((267 - 9 * bigger_than_J_count) * bigger_than_J_count + 48) / 2162.0\n else:\n # get two same big cards\n # prob = (4 * 4 / 47) * (3 / 46)\n prob = 0.022201665124884366\n return prob\n\n\ndef one_pair(card_list=[]):\n card_number_list = set([c[0] for c in card_list])\n if len(card_number_list) < len(card_list):\n return 1.0\n cards_to_deal = 7 - len(card_list)\n\n if cards_to_deal == 1:\n # card type should be 1 1 1 1 1 1\n # get one same with single\n # prob = 3 * 6 / 46\n prob = 0.391304347826087\n else:\n # card type should be 1 1 1 1 1\n # cards_to_deal == 2\n # 1 - prob get one other card and an other card (1 - no pair)\n # prob = 1 - ((4 * (13 - 5) / 47) * (4 * (12 - 5) / 46))\n # prob = 0.5855689176688252\n # get one from single and any other one or\n # two same cards not same with single\n # prob = (3 * 5 / 47) + (8 * 4 / 47) * (3 * 5 / 46)\n # prob += (8 * 4 / 47) * (3 / 46)\n prob = 0.5855689176688251\n return prob\n\n\nif __name__ == '__main__':\n import sys\n card = []\n for i in range(len(sys.argv) - 1):\n card.append(sys.argv[i + 1])\n print(get_expected_value(card))\n","sub_path":"expected.py","file_name":"expected.py","file_ext":"py","file_size_in_byte":19044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"353084647","text":"import sys\nimport argparse\nimport tensorflow as tf\nfrom keras.models import Model, load_model\nfrom keras.layers import TimeDistributed, Conv1D, Dense, Embedding, Input, Dropout, LSTM, Bidirectional, MaxPooling1D, \\\n Flatten, concatenate\nfrom keras.initializers import RandomUniform\nfrom keras.callbacks import EarlyStopping\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.utils import plot_model\nfrom keras.models import load_model\nfrom hyperopt import Trials, STATUS_OK, tpe\nfrom hyperas import optim\nfrom hyperas.distributions import choice, uniform, randint\nfrom util.util import *\n\n\ndef data():\n class_ = sys.argv[1]\n property_name = sys.argv[2]\n\n train_df = get_train_data2(class_, property_name)\n print(train_df.shape)\n\n train_data, case2Idx, caseEmbeddings, word2Idx, wordEmbeddings, \\\n char2Idx, label2Idx, sentences_maxlen, words_maxlen = prepare_data(train_df)\n\n val_df = get_val_data2(property_name)\n print(val_df.shape)\n\n val_data = embed_sentences(add_char_information_in(tag_data(val_df)), class_, property_name)\n\n X_train, Y_train = split_data(train_data)\n X_val, Y_val = split_data(val_data)\n\n return X_train, Y_train, X_val, Y_val, caseEmbeddings, wordEmbeddings, label2Idx, char2Idx, words_maxlen\n\n\ndef model(X_train, Y_train, X_val, Y_val, wordEmbeddings, label2Idx, lstm_state_size):\n # word-level input\n words_input = Input(shape=(None,), dtype='int32', name='Words_input')\n words = Embedding(input_dim=wordEmbeddings.shape[0], output_dim=wordEmbeddings.shape[1],\n weights=[wordEmbeddings], trainable=False)(words_input)\n\n output = Bidirectional(LSTM(lstm_state_size,\n return_sequences=True,\n dropout=0.6, # on input to each LSTM block\n recurrent_dropout=0.25 # on recurrent input signal\n ), name=\"BLSTM\")(words)\n\n output = TimeDistributed(Dense(len(label2Idx), activation='softmax'), name=\"Softmax_layer\")(output)\n\n # set up model\n model = Model(inputs=[words_input], outputs=[output])\n\n model.compile(loss='sparse_categorical_crossentropy',\n optimizer='nadam', metrics=['accuracy'])\n\n model.summary()\n\n # fit model\n train_batch, train_batch_len = createBatches2BLSTM(X_train, Y_train)\n val_batch, val_batch_len = createBatches2BLSTM(X_val, Y_val)\n\n model.fit_generator(iterate_minibatches_BLSTM(train_batch, train_batch_len),\n steps_per_epoch=len(train_batch),\n # class_weight=class_weight_vect,\n epochs=10, verbose=2, validation_steps=len(val_batch),\n validation_data=iterate_minibatches_BLSTM(val_batch, val_batch_len))\n\n # score, acc = model.evaluate(X_val, Y_val, verbose=0)\n score, acc = model.evaluate_generator(generator=iterate_minibatches_BLSTM(val_batch, val_batch_len), steps=len(val_batch),\n verbose=0)\n print('Test accuracy:', acc)\n\n return {'loss': -acc, 'status': STATUS_OK, 'model': model}\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 4:\n class_ = sys.argv[1]\n property_name = sys.argv[2]\n lstm_state_size = int(sys.argv[3])\n\n X_train, Y_train, X_val, Y_val, caseEmbeddings, wordEmbeddings, label2Idx, char2Idx, words_maxlen = data()\n\n result = model(X_train, Y_train, X_val, Y_val, wordEmbeddings, label2Idx, lstm_state_size)\n\n best_model = result['model']\n\n best_model.save('models/' + class_ + '/dl/' + property_name + '-BLSTM_best_model.h5')\n\n model = load_model('models/' + class_ + '/dl/' + property_name + '-BLSTM_best_model.h5')\n\n test_df = get_test_data2(property_name)\n print(test_df.shape)\n\n test_data = embed_sentences(add_char_information_in(tag_data(test_df)), class_, property_name)\n\n X_test, Y_test = split_data(test_data)\n\n test_batch, test_batch_len = createBatches2BLSTM(X_test, Y_test)\n\n print(\"Evalutation of best performing model:\")\n score, acc = model.evaluate_generator(generator=iterate_minibatches_BLSTM(test_batch, test_batch_len),\n steps=len(test_batch), verbose=0)\n print(\"acc on test: \", acc)\n else:\n print(\"INFORM PARAMETERS\")","sub_path":"BLSTM_train.py","file_name":"BLSTM_train.py","file_ext":"py","file_size_in_byte":4365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"551579661","text":"import unittest\nfrom timetable1 import Timetable1\nfrom term import Term\nfrom day import Day\nfrom lesson import Lesson\nfrom action import Action\n\nlesson1 = Lesson(Timetable1, Term(12, 15, day = Day.THU), \"Programowanie\", \"Polak\", 2)\nlesson2 = Lesson(Timetable1, Term(8, 20, day = Day.THU), \"SysOps\", \"Rzecki\", 2)\nlesson3 = Lesson(Timetable1, Term(17, 20, day = Day.FRI), \"Kryptografia\", \"Topa\", 2)\n\ntable = Timetable1()\ntable.lessons = [lesson1, lesson2, lesson3]\n\n\nclass TestTimetable1(unittest.TestCase):\n\n\n def test_can_be(self):\n self.assertEqual(table.can_be_transferred_to(Term(15, 45, day = Day.THU), True), True)\n self.assertEqual(table.can_be_transferred_to(Term(19, 10, day = Day.FRI), True), False)\n\n\n def test_busy(self):\n self.assertEqual(table.busy(Term(15, 45, day = Day.THU)), False)\n self.assertEqual(table.busy(Term(18, 15, day = Day.FRI)), True)\n\n\n def test_put(self):\n to_add1 = Lesson(Timetable1, Term(15, 45, day = Day.THU), \"Rosyjski\", \"Jawor\", 2)\n to_add2 = Lesson(Timetable1, Term(18, 10, day = Day.FRI), \"Rosyjski\", \"Jawor\", 2)\n\n self.assertEqual(table.put(to_add1), True)\n self.assertEqual(table.put(to_add2), False)\n \n\n def test_parse(self):\n self.assertEqual(table.parse([\"d-\", \"aaaaa\", \"d+\", \"eeeeeeee\", \"t-\", \"t+\"]),[Action.DAY_EARLIER, Action.DAY_LATER, Action.TIME_EARLIER, Action.TIME_LATER])\n\n\n def test_perform(self):\n first = Lesson(Timetable1, Term(13, 45, day = Day.THU), \"Rosyjski\", \"Jawor\", 2)\n second = Lesson(Timetable1, Term(18, 10, day = Day.WED), \"Rosyjski\", \"Jawor\", 2)\n new = Timetable1()\n new.lessons = [first, second]\n\n actions = table.parse([\"d+\", \"d-\", \"t+\", \"t-\"])\n new.perform(actions)\n\n self.assertEqual(first.term.day.value, 5)\n self.assertEqual(first.term.hour, 15)\n self.assertEqual(first.term.minute, 15)\n self.assertEqual(second.term.day.value, 2)\n self.assertEqual(second.term.hour, 16)\n self.assertEqual(second.term.minute, 40)\n\n\n actions2 = table.parse([\"t+\",\"d-\",\"t-\",\"d-\"])\n new.perform(actions2)\n\n self.assertEqual(second.term.day.value, 1)\n self.assertEqual(first.term.hour, 15)\n self.assertEqual(first.term.minute, 15)\n\n def test_get(self):\n self.assertEqual(table.get(Term(8, 20, day = Day.THU)), lesson2)\n self.assertEqual(table.get(Term(14, 15, day = Day.THU)), None)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"lab4/DeanerySystem/test_timetable1.py","file_name":"test_timetable1.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"572163243","text":"import networkx as nx\nimport bmt\nfrom typing import Union, List, Dict\nfrom .prefix_manager import PrefixManager\nimport logging\nimport click\nimport re\n\nfrom collections import defaultdict\n\nfrom metamodel.utils.schemaloader import SchemaLoader\n\nclass Validator(object):\n \"\"\"\n Object for validating a property graph\n \"\"\"\n\n def __init__(self, record_size=None):\n self.prefix_manager = PrefixManager()\n self.items = set()\n self.errors = []\n self.schema = SchemaLoader('https://biolink.github.io/biolink-model/biolink-model.yaml').resolve()\n self.record_size = record_size\n self.error_dict = defaultdict(set)\n\n def ok(self):\n return len(self.errors) == 0\n\n def validate(self, G):\n \"\"\"\n Validate a property graph\n\n Test all node and edge properties plus relationship types are declared\n \"\"\"\n self.validate_categories(G)\n self.validate_edge_labels(G)\n self.validate_node_properties(G)\n # for nid,ad in G.nodes(data=True):\n # self.validate_node(nid, ad)\n # for oid,sid,ad in G.edges(data=True):\n # self.validate_edge(G, oid, sid, ad)\n\n def validate_node(self, nid, ad):\n self.validate_id(nid)\n self.validate_props(ad)\n self.validate_node_requirements(ad)\n\n def validate_edge(self, G, oid, sid, ad):\n self.validate_id(oid)\n self.validate_id(sid)\n self.validate_props(ad)\n self.validate_edge_requirements(G, oid, sid, ad)\n\n def validate_id(self, id):\n if \":\" in id:\n uri = self.prefix_manager.expand(id)\n if uri is None or uri == id:\n self.report(id, \"expansion is identical\")\n else:\n if id not in self.prefix_manager.prefixmap:\n self.report(id, \"no such short form\")\n\n def validate_node_requirements(self, ad):\n node_id = ad.get('id')\n\n self.test(lambda: 'id' in ad, node_id, 'node lacks id attribute')\n self.test(lambda: 'name' in ad, node_id, 'node lacks name attribute')\n success = self.test(lambda: 'category' in ad, node_id, 'node lacks category attribute')\n\n if not success:\n return\n\n category = ad['category']\n\n if isinstance(category, str):\n self.test(lambda: category in self.schema.classes, category, 'node category is invalid')\n elif isinstance(category, (list, tuple, set)):\n for c in category:\n self.test(lambda: c in self.schema.classes, c, 'node category is invalid')\n else:\n self.report(edge_label, f'category is invalid type: {type(category)}')\n\n labels = ad.get('labels')\n\n if labels is not None:\n for label in labels:\n if label not in self.schema.classes:\n self.report(label, 'node label is invalid')\n\n def validate_edge_requirements(self, G, oid, sid, ad):\n \"\"\"\n Checks that an edge has an edge_label, that it's valid and within the\n minimal list, and that the subject and object fall within the edge's\n domain and range.\n \"\"\"\n edge_id = ad.get('id')\n\n self.test(lambda: 'is_defined_by' in ad, edge_id, 'edge lacks is_defined_by attribute')\n self.test(lambda: 'provided_by' in ad, edge_id, 'edge lacks provided_by attribute')\n\n success = self.test(lambda: 'edge_label' in ad, edge_id, 'edge lacks edge_label attribute')\n\n if not success:\n return\n\n edge_label = ad['edge_label']\n\n if not isinstance(edge_label, str):\n self.report(edge_label, f'edge label is invalid type: {type(edge_label)}')\n return\n\n if edge_label not in self.schema.slots:\n self.report(edge_label, 'edge label is invalid')\n return\n\n slot = self.schema.slots[edge_label]\n\n fn = lambda: 'in_subset' in slot and 'translator_minimal' in slot['in_subset']\n self.test(fn, edge_label, 'edge label not in minimal list')\n\n object_category = G.node[oid]['category']\n subject_category = G.node[sid]['category']\n\n if slot.domain is not None:\n if slot.domain != subject_category:\n self.report(sid, f'{subject_category} is outside of domain of {edge_label}')\n\n if slot.range is not None:\n if slot.range != object_category:\n self.report(oid, f'{object_category} is outside of domain of {edge_label}')\n\n def validate_props(self, ad):\n for p,v in ad.items():\n self.validate_id(p)\n\n def test(self, fn, item, info=\"\"):\n if not fn():\n self.report(item, info)\n return False\n else:\n return True\n\n def validate_categories(self, G):\n with click.progressbar(G.nodes(data=True)) as bar:\n for n, data in bar:\n categories = data.get('category')\n if categories is None:\n self.log_node_error(n, 'absent category')\n elif not isinstance(categories, list):\n self.log_node_error(n, 'invalid category type', message='category type is {} when it should be {}'.format(type(categories), list))\n else:\n for category in categories:\n c = bmt.get_class(category)\n if c is None:\n self.log_node_error(n, 'invalid category', message='{} not in biolink model'.format(category))\n elif category != c.name and category in c.aliases:\n self.log_node_error(n, 'alias category', message='should not use alias {} for {}'.format(c.name, category))\n\n def validate_edge_labels(self, G):\n with click.progressbar(G.edges(data=True)) as bar:\n for u, v, data in bar:\n edge_label = data.get('edge_label')\n if edge_label is None:\n self.log_edge_error(u, v, 'absent edge label')\n elif not isinstance(edge_label, str):\n self.log_edge_error(u, v, 'invalid edge label type', message='edge label type is {} when it should be {}'.format(type(edge_label), str))\n else:\n p = bmt.get_predicate(edge_label)\n if p is None:\n self.log_edge_error(u, v, 'invalid edge label', message='{} not in biolink model'.format(edge_label))\n elif edge_label != p.name and edge_label in p.aliases:\n self.log_edge_error(u, v, 'alias edge label', message='should not use alias {} for {}'.format(p.name, edge_label))\n elif not re.match(r'^[a-z_]*$', edge_label):\n self.log_edge_error(u, v, 'invalid edge label', message='\"{}\" is not snake case'.format(edge_label))\n\n def validate_node_properties(self, G):\n named_thing = bmt.get_class('named thing')\n with click.progressbar(G.nodes(data=True)) as bar:\n for n, data in bar:\n for key, value in data.items():\n if key in named_thing.slots:\n if bmt.get_element(key).multivalued and not isinstance(value, list):\n self.log_node_error(n, 'invalid property type', message='{} type should be {} but its {}'.format(key, list, type(value)))\n if not bmt.get_element(key).multivalued and isinstance(value, list):\n self.log_node_error(n, 'invalid property type', message='{} type should be {} but its {}'.format(key, str, type(value)))\n if not re.match(r'^[^ :]+:[^ :]+$', n):\n self.log_node_error(n, 'invalid property value', message='id is not a curie')\n\n def log_edge_error(self, u, v, error_type, *, message=None):\n if self.record_size is None or len(self.error_dict[error_type]) < self.record_size:\n self.error_dict[error_type].add((u, v, message))\n\n def log_node_error(self, n, error_type, *, message=None):\n if self.record_size is None or len(self.error_dict[error_type]) < self.record_size:\n self.error_dict[error_type].add((n, message))\n\n def report(self, item, info=\"\"):\n if item in self.items:\n return\n msg = \"Item: {} Message: {}\".format(item, info)\n logging.error(msg)\n self.errors.append(msg)\n self.items.add(item)\n","sub_path":"kgx/validator.py","file_name":"validator.py","file_ext":"py","file_size_in_byte":8454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"193866446","text":"import sys\nimport pylab\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport warnings\n\nfrom common.optutil import warn_with_traceback\nfrom .plot_helper import save_figure, get_minor_locator\nfrom .settings import set_all, set_style\n\n#warnings.showwarning = warn_with_traceback\n\ndef plot_bar_chart(data,\n labels=None,\n xlabel=None,\n ylabel=None,\n ylim=None,\n xformatter=None,\n yformatter=None,\n ylocator=None,\n xlocator=None,\n legend=False,\n legend_options=None,\n yerrs=None,\n width=0.9,\n margin=0.,\n hatch_patterns=None,\n xticklabels=[],\n yminor=True,\n bottoms=None,\n context_kwargs={},\n style_kwargs={},\n palette_kwargs={},\n figsize=(9., 5.5),\n savefile=None):\n if not isinstance(data, np.ndarray):\n data = np.array(data, dtype=float)\n if data.ndim == 1:\n data = data.reshape(data.shape[0], 1)\n one_dim = True\n else:\n one_dim = False\n ngroups,ninstances = data.shape\n if bottoms is None:\n stacked = False\n bottoms = [0. for _ in range(ngroups)]\n else:\n stacked = True\n if labels is None:\n labels = ['' for _ in range(ninstances)]\n\n palette = set_all(sns, plt,\n context_kwargs=context_kwargs,\n style_kwargs=style_kwargs,\n palette_kwargs=palette_kwargs)\n palette = palette[:ngroups]\n edgecolors = ['black' for _ in range(ngroups)]\n\n ax = plt.gca()\n\n x = np.arange(ninstances)\n if yerrs is None:\n error_kw = {}\n yerrs = [None for _ in range(ngroups)]\n else:\n error_kw=dict(lw=2,\n capsize=5,\n capthick=2,\n ecolor='black')\n height_fn = None\n if one_dim == True:\n assert stacked != True\n dimw = width\n height_fn = get_height_1d\n x = x + margin\n offset = 1.0 - width\n xtick_locs = x + dimw\n elif stacked == True:\n x = x + margin\n height_fn = get_height_stacked\n dimw = width\n xtick_locs = x\n else:\n dimw = width / ninstances\n offset = 0.0\n height_fn = get_height_2d\n xtick_locs = x + dimw\n assert height_fn is not None\n\n for i in range(ngroups):\n height = height_fn(x, i, dimw)\n bg = plt.bar(height,\n data[i],\n width=dimw,\n color=palette[i],\n edgecolor=edgecolors[i],\n hatch=hatch_patterns[i],\n label=labels[i],\n align='center',\n bottom=bottoms[i],\n yerr=yerrs[i],\n error_kw=error_kw)\n #plt.xticks(x + dimw, xticklabels)\n plt.xticks(xtick_locs, xticklabels)\n\n if ylim is not None:\n plt.ylim(ylim)\n if ylabel is not None:\n plt.ylabel(ylabel)\n if yformatter is not None:\n ax.yaxis.set_major_formatter(yformatter)\n if xformatter is not None:\n ax.xaxis.set_major_formatter(xformatter)\n\n if xlocator is not None:\n ax.xaxis.set_major_locator(xlocator)\n if ylocator is not None:\n ax.yaxis.set_major_locator(ylocator)\n if yminor == True:\n minor_loc = get_minor_locator(ax.yaxis.get_major_locator())\n ax.yaxis.set_minor_locator(minor_loc)\n\n if legend:\n if legend_options is not None:\n ax.legend(**legend_options)\n else:\n ax.legend()\n\n if savefile:\n save_figure(plt, savefile, figsize)\n else:\n plt.show()\n plt.close()\n\ndef get_height_1d(x, i, dimw):\n #height = x+i*(dimw+offset)\n return x + i\n\ndef get_height_2d(x, i, dimw):\n return x + i * dimw\n\ndef get_height_stacked(x, i, dimw):\n return x\n","sub_path":"plot/plot_bar_chart.py","file_name":"plot_bar_chart.py","file_ext":"py","file_size_in_byte":4149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"296821027","text":"from db import get_connection, get_from_datamaster\n\nrequirements = []\n\n\ndef build():\n with get_connection() as con:\n cur = con.cursor()\n cur.execute(\"DROP TABLE IF EXISTS Statuses\")\n cur.execute(\"CREATE TABLE Statuses(\"\n \"Id INTEGER PRIMARY KEY AUTOINCREMENT, \"\n \"StatusName TEXT, \"\n \"StatusType TEXT)\")\n\n for csv_row in get_from_datamaster('Statuses.csv'):\n cur.execute(\"INSERT INTO Statuses (\"\n \"StatusName, StatusType) \"\n \"VALUES (\\\"{}\\\", \\\"{}\\\")\".format(\n csv_row.get('StatusName'),\n csv_row.get('StatusType')))\n","sub_path":"db/tables/statuses.py","file_name":"statuses.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"18264585","text":"import requests\n\n\n\nurl = 'https://135zyv4.xw0371.com/2018/06/05/tMfUgoyyqtDz38qW/out00{}.ts'.format(1)\npath = '权力的游戏/{}.ts'.format(1)\nkv = {'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'}\nprint(url)\nr = requests.get(url, headers = kv, verify=False)\nwith open(path, 'wb') as f:\n f.write(r.content) \nprint('done{}'.format(1))\n\n","sub_path":"爬虫/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"207752687","text":"import requests\r\nfrom itertools import cycle\r\nfrom bs4 import BeautifulSoup\r\n\r\ndef get_proxies():\r\n url = 'https://www.sslproxies.org/'\r\n response = requests.get(url)\r\n soup = BeautifulSoup(response.text, features= \"html.parser\")\r\n print(response.text)\r\n proxies = []\r\n for i in soup.find('tbody').find_all('tr'):\r\n proxies.append(i.find('td').text)\r\n return proxies\r\n\r\n\r\n#If you are copy pasting proxy ips, put in the list below\r\n#proxies = ['121.129.127.206:80', '124.41.215.238:45169', '185.93.3.123:8080', '194.182.64.67:3128', '106.0.38.174:8080', '163.172.175.210:3128', '13.92.196.150:8080']\r\nwith open('http_proxies.txt') as f:\r\n proxies = f.read().splitlines()\r\n\r\nprint(proxies)\r\nproxy_pool = cycle(proxies)\r\n\r\nurl = 'http://icanhazip.com/'\r\nfor i in range(len(proxies)):\r\n #Get a proxy from the pool\r\n proxy = next(proxy_pool)\r\n print(\"Request #%d\"%i)\r\n try:\r\n request = requests.Session()\r\n request.proxies = {'http': proxy, 'https': proxy}\r\n response = request.get('https://www.pravda.com.ua/')\r\n\r\n print(response.text)\r\n except:\r\n #Most free proxies will often get connection errors. You will have retry the entire request using another proxy to work.\r\n #We will just skip retries as its beyond the scope of this tutorial and we are only downloading a single url\r\n print(\"Skipping. Connnection error\")","sub_path":"stonks/scripts/parser_proxy_rss/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"355897577","text":"# -*- coding: utf-8 -*-\nfrom odoo import models, fields, api, _\nfrom datetime import datetime\nfrom email.utils import formataddr\nfrom odoo.exceptions import UserError\n\nclass AccountMove(models.Model):\n\n _name = 'account.move' \n _inherit = ['account.move', 'mail.thread']\n\n state = fields.Selection(track_visibility='always')\n ref = fields.Char(track_visibility='onchange')\n date = fields.Date(track_visibility='onchange') \n amount = fields.Monetary(compute='_amount_compute', store=True, track_visibility='onchange')\n \n @api.multi\n @api.depends('line_ids.debit', 'line_ids.credit')\n def _amount_compute(self):\n for move in self:\n total = 0.0\n for line in move.line_ids:\n total += line.debit\n move.amount = total\n \n @api.multi\n def button_cancel(self):\n res = super(AccountMove,self).button_cancel()\n self.create_mail_message(body='Cancel Journal Entry')\n return res\n \n @api.one\n def _get_default_from(self):\n if self.env.user.email:\n return formataddr((self.env.user.name, self.env.user.email))\n raise UserError(_(\"Unable to send email, please configure the sender's email address or alias.\"))\n \n @api.multi\n def create_mail_message(self, body):\n user = self.env.user\n for move in self:\n vals = {'type': 'notification',\n 'author_id': user.partner_id.id,\n 'date': datetime.now(),\n 'email_from': self._get_default_from(),\n 'model': 'account.move',\n 'res_id': move.id,\n 'subtype_id': 2,\n 'body': body}\n self.env['mail.message'].create(vals)","sub_path":"hr-new_branch/journal_entry_chatter/model/account_move.py","file_name":"account_move.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"59994939","text":"import sys\nfrom json import loads\nfrom shapely.geometry import Point, mapping\nfrom fiona import collection\nfrom fiona.crs import from_epsg\n\ndef write_shapefile(the_points):\n \n #\n the_points_parsed = json.loads(the_points)\n the_points_parsed = the_points_parsed['results']\n \n #\n station_schema = { 'geometry': 'Point', \n \n 'properties': { \n \n 'ELEV': 'str',\n 'MINDATE': 'str', \n 'MAXDATE': 'str', \n 'LAT': 'str', \n 'STANAME': 'str', \n 'PCTCOV': 'str',\n 'STAID': 'str',\n 'ELEVUNIT': 'str',\n 'LNG': 'str',\n } \n \n }\n \n #\n with collection('some.shp', 'w', driver='ESRI Shapefile', schema=station_schema, crs=from_epsg(4326)) as output:\n for record in the_points_parsed:\n \n #\n point = Point(float(record['longitude']), float(record['latitude']))\n output.write({\n \n 'properties': {\n 'ELEV': record['elevation'], \n 'MINDATE': record['mindate'], \n 'MAXDATE': record['maxdate'], \n 'LAT': record['latitude'], \n 'STANAME': record['name'], \n 'PCTCOV': record['datacoverage'], \n 'STAID': record['id'], \n 'ELEVUNIT': record['elevationUnit'], \n 'LNG': record['longitude']\n },\n \n 'geometry': mapping(point)\n \n })\n \nif __name__ == '__main__':\n write_shapefile()","sub_path":"util/write.py","file_name":"write.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"571352026","text":"from unittest.mock import MagicMock, patch\n\nimport numpy as np\nimport pytest\n\nfrom skrough.algorithms.hooks.finalize_hooks import finalize_hook_choose_objs_randomly\nfrom skrough.algorithms.key_names import (\n VALUES_GROUP_INDEX,\n VALUES_RESULT_OBJS,\n VALUES_Y,\n VALUES_Y_COUNT,\n)\nfrom skrough.dataprep import prepare_factorized_vector\nfrom skrough.structs.group_index import GroupIndex\nfrom skrough.structs.state import ProcessingState\n\n\n@pytest.mark.parametrize(\n \"group_index, y, permutation, expected_objs\",\n [\n ([], [], [], []),\n ([0, 1, 2], [0, 1, 2], [0, 1, 2], [0, 1, 2]),\n ([0, 0, 0, 1], [0, 1, 0, 2], [0, 1, 2, 3], [0, 2, 3]),\n ([0, 0, 0, 1], [0, 1, 0, 2], [2, 1, 0, 3], [0, 2, 3]),\n ([0, 0, 0, 1], [0, 1, 0, 2], [1, 0, 2, 3], [1, 3]),\n ([0, 0, 1, 1], [0, 1, 0, 1], [0, 2, 1, 3], [0, 2]),\n ([0, 0, 1, 1], [0, 1, 0, 1], [0, 3, 1, 2], [0, 3]),\n ([0, 0, 1, 1], [0, 1, 0, 1], [1, 2, 0, 3], [1, 2]),\n ([0, 0, 1, 1], [0, 1, 0, 1], [1, 3, 0, 2], [1, 3]),\n ],\n)\n@patch(\"skrough.instances.get_permutation\")\ndef test_finalize_state_hook_choose_objs_random(\n get_permutation_mock: MagicMock,\n group_index,\n y,\n permutation,\n expected_objs,\n state_fixture: ProcessingState,\n):\n get_permutation_mock.return_value = np.asarray(permutation)\n\n group_index = GroupIndex.from_index(group_index)\n y, y_count = prepare_factorized_vector(y)\n state_fixture.values = {\n VALUES_GROUP_INDEX: group_index,\n VALUES_Y: y,\n VALUES_Y_COUNT: y_count,\n }\n finalize_hook_choose_objs_randomly(state_fixture)\n # is this a false positive unsubscriptable-object?\n # pylint: disable-next=unsubscriptable-object\n assert np.array_equal(state_fixture.values[VALUES_RESULT_OBJS], expected_objs)\n","sub_path":"tests/algorithms/hooks/test_finalize_state_hooks.py","file_name":"test_finalize_state_hooks.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"569877473","text":"# -*- coding: utf-8 -*-\nimport os\nimport sys\n\nAPP_ROOT = os.path.abspath(os.path.dirname(__file__))\n\nlibs_path = os.path.join(APP_ROOT, 'libs')\nif not libs_path in sys.path:\n sys.path.insert(1, libs_path)\ndel libs_path\n\nDEBUG = True\nSECRET_KEY = '123456'\nTOKEN_SALT = '20140714'\n\nPAGINATION_LIMIT = 12\nMAX_IMAGE_CONTENT_LENGTH = 5 * 1024 * 1024\nALLOWED_IMAGE_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])\nUPLOAD_IMAGE_FOLDER = APP_ROOT + '/data/'\n\n#log到文件\nLOG_SILENT = False\n\n# session\nSESSION_SALT = '123456'\nSESSION_COOKIE_NAME = 'sid'\nPERMANENT_SESSION_LIFETIME = 1209600 #14 days\n\n#\nIGNORE_CSRF_ROUTE = (\n '/examples/login',\n '/admin/login'\n)\n\n# services\nURLS = {\n \n}\n\n# redis\nREDIS = {\n 'examples': {\n 'SESSION_COOKIE_NAME': 'examples_test',\n #SESSION_COOKIE_DOMAIN = ''\n 'SESSION_COOKIE_PATH': '/',\n 'SESSION_COOKIE_HTTPONLY': True,\n 'SESSION_COOKIE_SECURE': False,\n 'PERMANENT_SESSION_LIFETIME': 1209600, #14 days\n 'SESSION_KEY_PREFIX': 'session:salon.examples:stage:',\n 'SESSION_REDIS_HOST': '127.0.0.1',\n 'SESSION_REDIS_PORT': 6379,\n 'SESSION_REDIS_DB': 0,\n 'SESSION_REDIS_PASSWORD': None\n }\n}\n\n","sub_path":"configs/release.py","file_name":"release.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"545957826","text":"from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport sys\nimport os\n\nimport os.path as osp\n\nimport time\nimport yaml\nimport torch\n\nimport smplx\n\nfrom utils import JointMapper\nfrom cmd_parser import parse_config\nfrom data_parser import create_dataset\nfrom fit_single_frame import fit_single_frame\n\nfrom camera import create_camera\nfrom prior import create_prior\n\ntorch.backends.cudnn.enabled = False\n\n\ndef main(**args):\n output_folder = args.pop('output_folder')\n output_folder = osp.expandvars(output_folder)\n if not osp.exists(output_folder):\n os.makedirs(output_folder)\n\n # Store the arguments for the current experiment\n conf_fn = osp.join(output_folder, 'conf.yaml')\n with open(conf_fn, 'w') as conf_file:\n yaml.dump(args, conf_file)\n\n result_folder = args.pop('result_folder', 'results')\n result_folder = osp.join(output_folder, result_folder)\n if not osp.exists(result_folder):\n os.makedirs(result_folder)\n\n #mesh_folder = args.pop('mesh_folder', 'meshes')\n mesh_folder = './models/'\n\n mesh_folder = osp.join(output_folder, mesh_folder)\n if not osp.exists(mesh_folder):\n os.makedirs(mesh_folder)\n\n out_img_folder = osp.join(output_folder, 'images')\n if not osp.exists(out_img_folder):\n os.makedirs(out_img_folder)\n\n float_dtype = args['float_dtype']\n if float_dtype == 'float64':\n dtype = torch.float64\n elif float_dtype == 'float32':\n dtype = torch.float64\n else:\n print('Unknown float type {}, exiting!'.format(float_dtype))\n sys.exit(-1)\n\n use_cuda = args.get('use_cuda', True)\n if use_cuda and not torch.cuda.is_available():\n print('CUDA is not available, exiting!')\n sys.exit(-1)\n\n img_folder = args.pop('img_folder', 'images')\n dataset_obj = create_dataset(img_folder=img_folder, **args)\n\n start = time.time()\n\n #input_gender = args.pop('gender', 'neutral')\n input_gender = args.pop('gender', 'neutral')\n #gender_lbl_type = args.pop('gender_lbl_type', 'none')\n gender_lbl_type = 'none'\n #max_persons = args.pop('max_persons', -1)\n max_persons = -1\n\n float_dtype = args.get('float_dtype', 'float32')\n if float_dtype == 'float64':\n dtype = torch.float64\n elif float_dtype == 'float32':\n dtype = torch.float32\n else:\n raise ValueError('Unknown float type {}, exiting!'.format(float_dtype))\n\n joint_mapper = JointMapper(dataset_obj.get_model2data())\n\n #python3 smplifyx/main.py --config cfg_files/fit_smpl.yaml --data_folder ./input/sport --output_folder ./output --visualize=True --model_folder ./models --vposer_ckpt ../vposer_v1_0 --part_segm_fn smplx_parts_segm.pkl --interpenetration False --use_joints_conf False --use_face False --use_hands False\n\n model_params = dict(model_path=args.get('model_folder'),\n joint_mapper=joint_mapper,\n create_global_orient=True,\n create_body_pose=not args.get('use_vposer'),\n create_betas=True,\n create_left_hand_pose=True,\n create_right_hand_pose=True,\n create_expression=True,\n create_jaw_pose=True,\n create_leye_pose=True,\n create_reye_pose=True,\n create_transl=False,\n dtype=dtype,\n **args)\n\n female_model = smplx.create(gender='female', **model_params)\n\n # Create the camera object\n focal_length = args.get('focal_length')\n camera = create_camera(focal_length_x=focal_length,\n focal_length_y=focal_length,\n dtype=dtype,\n **args)\n\n if hasattr(camera, 'rotation'):\n camera.rotation.requires_grad = False\n\n use_hands = args.get('use_hands', False)\n use_face = args.get('use_face', False)\n\n body_pose_prior = create_prior(\n prior_type=args.get('body_prior_type'),\n dtype=dtype,\n **args)\n\n jaw_prior, expr_prior = None, None\n left_hand_prior, right_hand_prior = None, None\n\n rhand_args = args.copy()\n rhand_args['num_gaussians'] = args.get('num_pca_comps')\n right_hand_prior = create_prior(\n prior_type=args.get('right_hand_prior_type'),\n dtype=dtype,\n use_right_hand=True,\n **rhand_args)\n\n shape_prior = create_prior(\n prior_type=args.get('shape_prior_type', 'l2'),\n dtype=dtype, **args)\n\n angle_prior = create_prior(prior_type='angle', dtype=dtype)\n\n if use_cuda and torch.cuda.is_available():\n device = torch.device('cuda')\n\n camera = camera.to(device=device)\n female_model = female_model.to(device=device)\n body_pose_prior = body_pose_prior.to(device=device)\n angle_prior = angle_prior.to(device=device)\n shape_prior = shape_prior.to(device=device)\n\n else:\n device = torch.device('cpu')\n\n # A weight for every joint of the model\n joint_weights = dataset_obj.get_joint_weights().to(device=device,\n dtype=dtype)\n # Add a fake batch dimension for broadcasting\n joint_weights.unsqueeze_(dim=0)\n\n for idx, data in enumerate(dataset_obj):\n\n img = data['img']\n fn = data['fn']\n keypoints = data['keypoints']\n print('Processing: {}'.format(data['img_path']))\n\n curr_result_folder = osp.join(result_folder, fn)\n if not osp.exists(curr_result_folder):\n os.makedirs(curr_result_folder)\n curr_mesh_folder = osp.join(mesh_folder, fn)\n if not osp.exists(curr_mesh_folder):\n os.makedirs(curr_mesh_folder)\n for person_id in range(keypoints.shape[0]):\n if person_id >= max_persons and max_persons > 0:\n continue\n\n curr_result_fn = osp.join(curr_result_folder,\n '{:03d}.pkl'.format(person_id))\n curr_mesh_fn = osp.join(curr_mesh_folder,\n '{:03d}.obj'.format(person_id))\n\n curr_img_folder = osp.join(output_folder, 'images', fn,\n '{:03d}'.format(person_id))\n if not osp.exists(curr_img_folder):\n os.makedirs(curr_img_folder)\n\n\n gender = input_gender\n\n body_model = female_model\n\n out_img_fn = osp.join(curr_img_folder, 'output.png')\n\n fit_single_frame(img, keypoints[[person_id]],\n body_model=body_model,\n camera=camera,\n joint_weights=joint_weights,\n dtype=dtype,\n output_folder=output_folder,\n result_folder=curr_result_folder,\n out_img_fn=out_img_fn,\n result_fn=curr_result_fn,\n mesh_fn=curr_mesh_fn,\n shape_prior=shape_prior,\n expr_prior=expr_prior,\n body_pose_prior=body_pose_prior,\n left_hand_prior=left_hand_prior,\n right_hand_prior=right_hand_prior,\n jaw_prior=jaw_prior,\n angle_prior=angle_prior,\n **args)\n\n elapsed = time.time() - start\n time_msg = time.strftime('%H hours, %M minutes, %S seconds',\n time.gmtime(elapsed))\n print('Processing the data took: {}'.format(time_msg))\n\n\nif __name__ == \"__main__\":\n args = parse_config()\n main(**args)\n","sub_path":"smplify-x/smplifyx/get_smpl.py","file_name":"get_smpl.py","file_ext":"py","file_size_in_byte":7818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"194404270","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 28 17:07:22 2020\n\n@author: ahmet\n\"\"\"\n\nimport hashlib\n\nclass Block:\n def __init__(self, previous_hash, transaction):\n self.transactions = transaction\n self.previous_hash = previous_hash\n string_to_hash = \"\".join(transaction) + previous_hash\n self.block_hash = hashlib.sha256(string_to_hash.encode()).hexdigest()","sub_path":"Block.py","file_name":"Block.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"169352562","text":"from django.contrib import admin\r\n\r\nfrom ..models.review import Review\r\n\r\n\r\n@admin.register(Review)\r\nclass ReviewAdmin(admin.ModelAdmin):\r\n list_display = ('id', 'title', 'text', 'author', 'score', 'pub_date')\r\n list_filter = ('pub_date',)\r\n search_fields = ('title', 'text', 'author')\r\n","sub_path":"api/admin/review_admin.py","file_name":"review_admin.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"571090717","text":"\"\"\"\n\n\"\"\"\nimport logging\nimport datetime\nimport os\nimport json\nfrom typing import Optional\nfrom sklearn.metrics import accuracy_score, classification_report\nfrom tqdm import tqdm\nfrom typing import Dict\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader\nfrom collections import defaultdict, namedtuple\nfrom discrete_nn.settings import model_path, checkpoint_path\nimport gc\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\nCheckpoint = namedtuple(\"Checkpoint\", [\"parameters\", \"epoch\", \"date\", \"metrics\", \"train_data_set\",\n \"validation_data_set\", \"test_data_set\"])\n\n\nclass BaseModel(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.optimizer = None\n self.loss_funct = None\n\n def _epoch_eval_callback(self, validation_dataset: DataLoader) -> Optional[Dict]:\n \"\"\"\n method called by the train model method after the training step in each epoch. Allows custom subclasses\n to do additional evaluations and returns additional epoch stats. Used for logit models to include discretized\n evaluations\n :return: a dictionary with additional epoch metrics or None\n \"\"\"\n return None\n\n def _model_testing_callback(self, testing_dataset: DataLoader) -> Optional[Dict]:\n \"\"\"\n method called by the train model method when training is done and testing the model with the test dataset.\n Allows custom subclasses to do additional evaluations and returns additional epoch stats.\n Used for logit models to include discretized evaluations\n :return: a dictionary with additional epoch metrics or None\n \"\"\"\n return None\n\n def _evaluate(self, dataset_generator):\n \"\"\"\n Evaluates a method using dataset_generator\n :param dataset_generator: a sample generator\n\n :return: evaluation_loss, evaluation_acc, classification_report_dict\n \"\"\"\n self.eval()\n validation_losses = []\n targets = []\n predictions = []\n # disables gradient calculation since it is not needed\n with torch.no_grad():\n gc.collect()\n for batch_inx, (X, Y) in enumerate(dataset_generator):\n outputs = self(X)\n loss = self.loss_funct(outputs, Y)\n validation_losses.append(float(loss))\n predictions += torch.nn.functional.softmax(outputs, dim=1).argmax(dim=1).tolist()\n targets += Y.tolist()\n return self._gen_stats(targets, predictions, validation_losses)\n\n def evaluate_model(self, dataset: DataLoader) -> Dict:\n \"\"\"\n Evaluates the model with dataset\n :param dataset: the dataset to evaluate with\n :return:\n \"\"\"\n loss, acc, class_report_dict = self._evaluate(dataset)\n stats = defaultdict(list)\n stats[\"loss\"].append(loss)\n stats[\"acc\"].append(acc)\n stats[\"classification_report\"].append(class_report_dict)\n return stats\n\n def evaluate_and_save_to_disk(self, dataset, name):\n \"\"\"given a dataset extending Pytorch's Dataset class, and a name for the folder where the results\n will be placed, evaluates the network.\"\"\"\n stats = self.evaluate_model(dataset)\n self.save_to_disk(stats, name, False)\n\n @staticmethod\n def _gen_stats(targets, predictions, losses):\n \"\"\" generates basic training/evaluation information\"\"\"\n eval_loss = float(np.mean(losses))\n eval_acc = accuracy_score(targets, predictions)\n class_report_dict = classification_report(targets, predictions, output_dict=True)\n return eval_loss, eval_acc, class_report_dict\n\n def get_net_parameters(self):\n return self.state_dict()\n\n def set_net_parameters(self, param_dict):\n self.load_state_dict(param_dict, strict=False)\n\n def _train_epoch(self, dataset_generator):\n self.train()\n self.zero_grad()\n batch_loss_train = []\n targets = []\n predictions = []\n # training part of epoch\n for batch_inx, (X, Y) in enumerate(dataset_generator):\n gc.collect()\n self.optimizer.zero_grad() # reset gradients from previous iteration\n # do forward pass\n net_output = self(X)\n # compute loss\n loss = self.loss_funct(net_output, Y)\n # backward propagate loss\n loss.backward()\n self.optimizer.step()\n batch_loss_train.append(float(loss))\n predictions += torch.nn.functional.softmax(net_output, dim=1).argmax(dim=1).tolist()\n targets += Y.tolist()\n\n return self._gen_stats(targets, predictions, batch_loss_train)\n\n def save_to_disk(self, stats, name: str, save_model=True):\n \"\"\"Saves model's pickled class as pickle, the training metrics and a copy of the weight parameters as a pickle\n to disk\n :returns path to the folder containing the model and its metrics\"\"\"\n now = datetime.datetime.now()\n container_folder = os.path.join(model_path, name + f\"-{now.year}-{now.month}-{now.day}\"\n f\"--h{now.hour}m{now.minute}\")\n\n os.mkdir(container_folder)\n\n with open(os.path.join(container_folder, \"metrics.json\"), \"w\") as f:\n json.dump(stats, f)\n if save_model:\n model = self\n\n with open(os.path.join(container_folder, f\"{model.__class__.__name__}.pickle\"), \"wb\") as f:\n torch.save(model, f)\n\n with open(os.path.join(container_folder, f\"{model.__class__.__name__}.param.pickle\"), \"wb\") as f:\n torch.save(model.get_net_parameters(), f)\n\n return container_folder\n\n def save_checkpoint(self, epoch_number, metrics, training_data_set, validation_data_set, test_dataset,\n checkpoint_file_path):\n ckp = Checkpoint(epoch=epoch_number, parameters=self.get_net_parameters(), test_data_set=test_dataset,\n train_data_set=training_data_set, validation_data_set=validation_data_set,\n date=datetime.datetime.now().isoformat(), metrics=metrics)\n torch.save(ckp, checkpoint_file_path)\n\n def train_model(self, training_dataset, validation_dataset, test_dataset: DataLoader, epochs, model_name,\n evaluate_before_train: bool = False, continue_from_checkpoint: bool = True,\n checkpoint_frequency: int = 3):\n \"\"\"\n Trains the model with a training dataset and uses the validation_dataset to _evaluate it at every epoch\n :param training_dataset: generator for training data\n :param validation_dataset: ... for validation\n :param test_dataset: ... for test\n :param epochs: number of epochs to train for\n :param model_name: a name for the model (important for saving to disk)\n :param evaluate_before_train: if set, model will be evaluate before training (useful in the case of a logit\n model initialized with real weights). The untrained model will be saved to disk\n :param continue_from_checkpoint: if set will continue from checkpoint if it is found on disk\n :param checkpoint_frequency: the frequency at which checkpoints are saved (in epochs)\n :return: the path to the folder where metrics were saved\n \"\"\"\n stats = defaultdict(list)\n\n # getting device the models parameters are using\n device_net = list(self.get_net_parameters().values())[0].device\n\n start_epoch_inx = 0\n # check if there is a checkpoint\n model_save_folder = f\"{model_name}-trained\"\n checkpoint_full_path = os.path.join(checkpoint_path, f\"ckp_{model_name}.pickle\")\n if os.path.exists(checkpoint_full_path):\n ckp: Checkpoint = torch.load(checkpoint_full_path, map_location=\"cpu\")\n if continue_from_checkpoint:\n logger.info(f\"Found checkpoint for {model_name} dated {ckp.date} at epoch {ckp.epoch}.\"\n f\" Continuing from checkpoint.\")\n\n self.set_net_parameters({param_key: param_val.to(device_net) for param_key, param_val in\n ckp.parameters.items()})\n stats = ckp.metrics\n start_epoch_inx = ckp.epoch\n training_dataset = ckp.train_data_set\n test_dataset = ckp.test_data_set\n validation_dataset = ckp.validation_data_set\n logger.info(f\"Loading train/validation/test datasets from checkpoint to preserve class splits.\")\n else:\n logger.info(f\"Found checkpoint for {model_name} dated {ckp.date} at epoch {ckp.epoch}.\"\n f\" but cannot continue from checkpoint because continue_from_checkpoint is False\")\n else:\n logger.info(f\"Could not find checkpoint for {model_name}\")\n\n if evaluate_before_train and start_epoch_inx == 0:\n # should only evaluate if we are not loading from a checkpoint! Uses start epoch inx as a proxy for that\n eval_stats = defaultdict(list)\n test_loss, test_acc, test_class_report = self._evaluate(test_dataset)\n eval_stats[\"test_loss\"] = [test_loss]\n eval_stats[\"test_acc\"] = [test_acc]\n eval_stats[\"test_classification_report\"] = test_class_report\n test_callback = self._model_testing_callback(test_dataset)\n if test_callback is not None:\n eval_stats.update(test_callback)\n self.save_to_disk(eval_stats, f\"{model_name}-untrained\", save_model=False)\n\n for epoch_in in tqdm(range(start_epoch_inx, epochs), initial=start_epoch_inx, total=epochs,\n desc=\"Training Network. Epoch:\"):\n training_loss, training_acc, training_class_report = self._train_epoch(training_dataset)\n training_loss_post_update, training_acc_post_update, training_class_report_post_update = self._evaluate(\n training_dataset)\n # starting epochs evaluation\n validation_loss, validation_acc, validation_class_report = self._evaluate(validation_dataset)\n\n stats[\"training_loss\"].append(training_loss)\n stats[\"training_acc\"].append(training_acc)\n stats[\"training_classification_report\"].append(training_class_report)\n\n stats[\"training_loss_post_update\"].append(training_loss_post_update)\n stats[\"training_acc_post_update\"].append(training_acc_post_update)\n stats[\"training_classification_report_post_update\"].append(training_class_report_post_update)\n\n stats[\"validation_loss\"].append(validation_loss)\n stats[\"validation_acc\"].append(validation_acc)\n stats[\"validation_classification_report\"].append(validation_class_report)\n\n # calls subclasses callback so they can add any metric the wish\n val_callback = self._epoch_eval_callback(validation_dataset)\n tqdm.write(f\"val callback is {val_callback}-\")\n if val_callback is not None:\n for metric_name, metric_value in val_callback.items():\n stats[metric_name].append(metric_value)\n\n # saves checkpoint if needed\n if (epoch_in+1) % checkpoint_frequency == 0:\n # saves checkpoint\n self.save_checkpoint(epoch_in+1, stats, training_dataset, validation_dataset, test_dataset,\n checkpoint_full_path)\n\n tqdm.write(f\"epoch {epoch_in + 1}/{epochs}: \"\n f\"train loss: {training_loss:.4f} / \"\n f\"validation loss: {validation_loss:.4f} /\"\n f\"validation acc: {validation_acc} /\"\n f\"validation precision: {validation_class_report['weighted avg']['precision']} /\"\n f\"validation recall: {validation_class_report['weighted avg']['recall']} /\")\n\n test_loss, test_acc, test_class_report = self._evaluate(test_dataset)\n stats[\"test_loss\"] = [test_loss]\n stats[\"test_acc\"] = [test_acc]\n stats[\"test_classification_report\"] = test_class_report\n test_callback = self._model_testing_callback(test_dataset)\n if test_callback is not None:\n stats.update(test_callback)\n print(f\"test callback is {test_callback}-\")\n # removing checkpoint if any\n if os.path.exists(checkpoint_full_path):\n os.remove(checkpoint_full_path)\n return self.save_to_disk(stats, model_save_folder)\n\n\nclass LogitModel(BaseModel):\n\n def __init__(self):\n super().__init__()\n\n def _epoch_eval_callback(self, validation_dataset: DataLoader) -> Optional[dict]:\n sample_stats = self.evaluate_discretized_from_logit_models(\"sample\", validation_dataset, 10, None)\n argmax_stats = self.evaluate_discretized_from_logit_models(\"argmax\", validation_dataset, 1, None)\n stats = dict()\n stats[\"validation_loss_discrete_sample\"] = sample_stats[\"mean_loss\"]\n stats[\"validation_acc_discrete_sample\"] = sample_stats[\"mean_acc\"]\n stats[\"validation_loss_discrete_argmax\"] = argmax_stats[\"mean_loss\"]\n stats[\"validation_acc_discrete_argmax\"] = argmax_stats[\"mean_acc\"]\n return stats\n\n def _model_testing_callback(self, testing_dataset: DataLoader):\n sample_stats = self.evaluate_discretized_from_logit_models(\"sample\", testing_dataset, 10, None)\n argmax_stats = self.evaluate_discretized_from_logit_models(\"argmax\", testing_dataset, 1, None)\n stats = dict()\n stats[\"test_loss_discrete_sample\"] = sample_stats[\"mean_loss\"]\n stats[\"test_acc_discrete_sample\"] = sample_stats[\"mean_acc\"]\n stats[\"test_loss_discrete_argmax\"] = argmax_stats[\"mean_loss\"]\n stats[\"test_acc_discrete_argmax\"] = argmax_stats[\"mean_acc\"]\n return stats\n\n def generate_discrete_networks(self, method: str) -> BaseModel:\n raise NotImplementedError\n\n def evaluate_discretized_from_logit_models(self, discretization_method: str, dataset: DataLoader,\n num_trials: int, result_save_path):\n \"\"\"\n Given a logit model (such as a ternary one), generates a discrete one from it using the provided\n discretization method and evaluates it with dataset.\n\n :param discretization_method: e.g. sample\n :param dataset: the dataset to evaluate the discrete model with\n :param num_trials: the number of independent evaluations (discretizing the model again everytime)\n :param result_save_path: the path to save results to. If None does not save\n :return: a copy of the metrics dictionary\n \"\"\"\n # gets device being used for dataset by looking into dataset used by dataloader\n device = dataset.dataset[0][0].device\n\n results = []\n for i in range(num_trials):\n # discretizes\n disc_model = self.generate_discrete_networks(discretization_method)\n disc_model = disc_model.to(device)\n stats = disc_model.evaluate_model(dataset)\n results.append(stats)\n mean_loss = sum([result[\"loss\"][0] for result in results]) / num_trials\n mean_acc = sum([result[\"acc\"][0] for result in results]) / num_trials\n\n stats = {\"trials\": results, \"mean_loss\": mean_loss, \"mean_acc\": mean_acc}\n if result_save_path is not None:\n if not os.path.exists(result_save_path):\n os.mkdir(result_save_path)\n with open(os.path.join(result_save_path, \"metrics.json\"), \"w\") as f:\n json.dump(stats, f)\n return stats\n\n\nclass ForcedQuantizationBaseModel(BaseModel):\n \"\"\"\n This class is the base implementation of generic models which apply a discretization step during training\n \"\"\"\n\n def __init__(self):\n super().__init__()\n\n def discretize(self):\n \"\"\"Discretizes a model's weights. Model dependent\"\"\"\n raise NotImplementedError\n\n def train_model(self, training_dataset, validation_dataset, test_dataset, epochs, model_name,\n evaluate_before_train: bool = False):\n \"\"\"\n Trains the model with a training dataset and uses the validation_dataset to evaluate it at every epoch. Results\n are saved to disk.\n :param training_dataset: generator for training data\n :param validation_dataset: ... for validation\n :param test_dataset: ... for test\n :param epochs: number of epochs to train for\n :param model_name: a name for the model (important for saving to disk)\n :param evaluate_before_train: if set, model will be evaluate before training (useful in the case of a logit\n model initialized with real weights). The untrained model will be saved to disk\n :return:\n \"\"\"\n if evaluate_before_train:\n eval_stats = defaultdict(list)\n test_loss, test_acc, test_class_report = self._evaluate(test_dataset)\n eval_stats[\"test_loss\"] = [test_loss]\n eval_stats[\"test_acc\"] = [test_acc]\n eval_stats[\"test_classification_report\"] = test_class_report\n self.save_to_disk(eval_stats, f\"{model_name}-untrained\")\n stats = defaultdict(list)\n\n for epoch_in in tqdm(range(epochs), desc=\"Training Network. Epoch:\"):\n training_loss, training_acc, training_class_report = self._train_epoch(training_dataset)\n # call discretization method\n with torch.no_grad():\n self.discretize()\n # starting epochs evaluation\n validation_loss, validation_acc, validation_class_report = self._evaluate(validation_dataset)\n training_loss_post_update, training_acc_post_update, training_class_report_post_update = self._evaluate(\n training_dataset)\n\n stats[\"training_loss\"].append(training_loss)\n stats[\"training_acc\"].append(training_acc)\n stats[\"training_classification_report\"].append(training_class_report)\n\n stats[\"training_loss_post_discretize\"].append(training_loss_post_update)\n stats[\"training_acc_post_discretize\"].append(training_acc_post_update)\n stats[\"training_classification_report_post_discretize\"].append(training_class_report_post_update)\n\n stats[\"validation_loss_post_discretize\"].append(validation_loss)\n stats[\"validation_acc_post_discretize\"].append(validation_acc)\n stats[\"validation_classification_report_post_discretize\"].append(validation_class_report)\n\n print(f\"epoch {epoch_in + 1}/{epochs}: \"\n f\"train loss: {training_loss:.4f} / \"\n f\"validation loss: {validation_loss:.4f} /\"\n f\"validation acc: {validation_acc} /\"\n f\"validation precision: {validation_class_report['weighted avg']['precision']} /\"\n f\"validation recall: {validation_class_report['weighted avg']['recall']} /\")\n\n test_loss, test_acc, test_class_report = self._evaluate(test_dataset)\n stats[\"test_loss\"] = [test_loss]\n stats[\"test_acc\"] = [test_acc]\n stats[\"test_classification_report\"] = test_class_report\n self.save_to_disk(stats, f\"{model_name}-trained\")\n","sub_path":"discrete_nn/models/base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":19449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"366072226","text":"\"\"\"\nThe program implements the algorithm for Harris Corner Detection, Daubechies D4 Wavelet Transform\nand Speeded-Up Robust Features (SURF). Also, it provides the code to match similar features (obtained\nusing Harris Corner Detection or SURF) between two images.\n\n@author Aditya Pulekar, Chirag Kular\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cv2,math,random\ncount=1\n\ndef HOG(img, x, y):\n \"\"\"\n Generate a descriptor around a given point in an image\n :param img: the image to describe\n :param x: the x value of the point to be described\n :param y: the y value of the point to be described\n :return: the [1, 128] descriptor of the point\n \"\"\"\n #TODO: write a HOG descriptor here\n des=[]\n row=0\n sub_image = img[x-8:x+8,y-8:y+8]\n while row < len(sub_image):\n col=0\n while col < len(sub_image[0]):\n temp_vector = [0 for i in range(8)]\n new_subimage = sub_image[row:row+4,col:col+4]\n x_gradient = cv2.Sobel(new_subimage,ddepth=-1,dx=1,dy=0)\n y_gradient = cv2.Sobel(new_subimage,ddepth=-1,dx=0,dy=1)\n theta = np.empty([x_gradient.shape[0],x_gradient.shape[1]])\n for i in range(len(x_gradient)):\n for j in range(len(x_gradient[0])):\n if x_gradient[i,j] == 0:\n theta[i,j] = 90\n else:\n theta[i,j] = np.arctan(y_gradient[i,j]/x_gradient[i,j])*(180/np.pi)\n theta_iter = theta.flatten() #To avoid nested for loops for 4x4 theta\n for i in range(len(theta_iter)):\n if theta_iter[i] < 45:\n temp_vector[0]=temp_vector[0]+1\n elif theta_iter[i] >= 45 and theta_iter[i] < 90:\n temp_vector[1]=temp_vector[1]+1\n elif theta_iter[i] >= 90 and theta_iter[i] < 135:\n temp_vector[2]=temp_vector[2]+1\n elif theta_iter[i] >= 135 and theta_iter[i] < 180:\n temp_vector[3]=temp_vector[3]+1\n elif theta_iter[i] >= 180 and theta_iter[i] < 225:\n temp_vector[4]=temp_vector[4]+1\n elif theta_iter[i] >= 225 and theta_iter[i] < 270:\n temp_vector[5]=temp_vector[5]+1\n elif theta_iter[i] >= 270 and theta_iter[i] < 315:\n temp_vector[6]=temp_vector[6]+1\n elif theta_iter[i] >= 315 and theta_iter[i] < 360:\n temp_vector[7]=temp_vector[7]+1\n des.extend(temp_vector)\n col=col+4\n row=row+4\n return des\n\n\ndef sumOfSquares(f1,f2):\n sum1=0;sum2=0\n for i in f1:\n sum1=sum1+math.pow(i,2)\n for i in f2:\n sum2=sum2+math.pow(i,2)\n rootOfSquare1 = math.sqrt(sum1)\n rootOfSquare2 = math.sqrt(sum2)\n return [rootOfSquare1,rootOfSquare2]\n\n\ndef matcher(features1, features2):\n \"\"\"\n Matches the descriptors from one image to the\n descriptors from another image\n :param features1: the first array of features [n, 128]\n :param features2: the second array of features [n, 128]\n :return: matching point pairs [[index1, index2], ... ]\n \"\"\"\n #TODO: write a matching function\n #Performing the L2-Norm\n new_features1=[]\n new_features2=[]\n for itr in range(5):\n [rootOfSquare1,rootOfSquare2] = sumOfSquares(features1[itr],features2[itr])\n new_features1.append(np.array(features1[itr])/rootOfSquare1)\n new_features2.append(np.array(features2[itr])/rootOfSquare2)\n indices = []\n for itr in range(len(new_features1)):\n findMinDist=[]\n #findMaxCosineVal=[]\n for itr2 in range(len(new_features2)):\n f1 = new_features1[itr]\n f2 = new_features2[itr2]\n\n #For evaluating the cosine similarity\n # [rootOfSquare1,rootOfSquare2] = sumOfSquares(f1,f2)\n # numerator = np.array(f1)*np.array(f2)\n # numeratorSum = sum(numerator)\n # denominator = rootOfSquare1*rootOfSquare2\n # cosine = np.divide(numeratorSum,denominator)\n # findMaxCosineVal.append(cosine)\n\n #For evaluating the similarity based on euclidean distance\n Dist = np.array(f1) - np.array(f2)\n sum=0\n for i in Dist:\n sum=sum+math.pow(i,2)\n rootOfSum = math.sqrt(sum)\n findMinDist.append(rootOfSum)\n bestMatch = findMinDist.index(min(findMinDist))\n indices.append([itr,bestMatch])\n return indices\n\n#Matches similar SURF features between two images\n# def surfMatcher(img1_color,img2_color):\n# s = cv2.SURF(450)\n# keyPt1,descp1 = s.detectAndCompute(img1_color,None)\n# new_Img = cv2.drawKeypoints(img1_color,keyPt1,None,(0,255,0))\n# plt.title(\"SURF Features for aerial1\")\n# cv2.imwrite(\"SURF_Features_Aerial1.jpg\",new_Img)\n# plt.imshow(new_Img)\n# plt.show()\n# keyPt2,descp2 = s.detectAndCompute(img2_color,None) #\"None\" has been given for the mask\n# new_Img = cv2.drawKeypoints(img2_color,keyPt2,None,(0,255,0))\n# plt.title(\"SURF Features for aerial2\")\n# cv2.imwrite(\"SURF_Features_Aerial2.jpg\",new_Img)\n# plt.imshow(new_Img)\n# plt.show()\n#\n# #Concatenate two images\n# out_image = concatenateTwoImages(img1_color,img2_color)\n# c1 = img1_color.shape[1]\n# m = np.amax(out_image.flatten())\n# cv2.imshow(\"Concatenated Image for SURF\",out_image/m)\n# cv2.waitKey(0)\n#\n# bf_match = cv2.BFMatcher(cv2.NORM_L2,crossCheck=False)\n# # match_pts= bf_match.knnMatch(np.asarray(descp1,np.float32),np.asarray(descp2,np.float32),1)\n# match_pts= bf_match.match(np.asarray(descp1,np.float32),np.asarray(descp2,np.float32))\n# match_pts_sorted = sorted(match_pts,reverse=True) #Why do we get better on taking the higher values?\n# #Peformance of surf changes every time we run the program\n# #Drawing matches for SURF\n# for itr in match_pts_sorted[:5]:\n# indexForI2 =itr.trainIdx\n# indexForI1 = itr.queryIdx\n# (ptX2,ptY2)=tuple(keyPt2[indexForI2].pt) #(X--> Columns, Y--> Rows)\n# (ptX1,ptY1)=tuple(keyPt1[indexForI1].pt)\n# cv2.circle(out_image,(int(ptX2)+c1,int(ptY2)),10,(0,255,0),thickness=3)\n# cv2.circle(out_image,(int(ptX1),int(ptY1)),10,(0,255,0),thickness=3)\n# cv2.line(out_image,(int(ptX1),int(ptY1)),(int(ptX2)+c1,int(ptY2)),(0,255,0),thickness=2)\n# cv2.imshow(\"Concatenated Image for SURF (With final key points)\",out_image/m)\n# cv2.imwrite(\"SURF_Features.jpg\",out_image)\n# cv2.waitKey(0)\n\n\ndef surfMatcher(img1_color):\n s = cv2.SURF(450)\n keyPt1,descp1 = s.detectAndCompute(img1_color,None)\n # new_Img = cv2.drawKeypoints(img1_color,keyPt1,None,(0,255,0))\n\n # Choosing limited Interest points from the ones we have got\n ra1=[];new_des=[]\n\n #Note: We are randomly taking 100 feature points from the ones detected by SURF\n for itr in range(100):\n while(True):\n a=random.randint(0,len(descp1)-1)\n if a not in ra1:\n ra1.append(a)\n break\n new_des.append(descp1[a])\n return new_des\n\n\ndef concatenateTwoImages(img1_color,img2_color):\n r2=img2_color.shape[0];c2=img2_color.shape[1]\n r1=img1_color.shape[0];c1=img2_color.shape[1]\n out_image=np.empty([max(r1,r2),c2+c1,3],dtype=np.float32)\n out_image[:r1,:c1]=np.dstack([img1_color])\n out_image[:r2,c1:]=np.dstack([img2_color])\n return out_image\n\ndef daubechies(img):\n FeatureVector=[]\n details=img.shape\n l=details[1]\n for rows in range(details[0]):\n if l>=4:\n mid=l//2;i=0;j=0\n #Scaling coeff\n h0=0.4829;h1=0.8365;h2=0.22414;h3=-0.1294\n #Wavelet coeff\n g0=-0.1294;g1=-0.22414;g2=0.8365;g3=-0.4829\n temp=[0 for itr in range(l)]\n while(jthreshForCorner) #This is non-maximum suppression (Though, we don't really suppress anything)\n #NOTE: Too many circles are drawn (Though, the result is right. Best option is to choose the best features)\n #Keep the threshold for corners just one lesser than the max value\n # for col in range(len(CR[0])):\n # cv2.circle(colored_img,(CR[1][col],CR[0][col]),5,(0,255,0))\n\n # NOTE: All the interest points highlighted in this image represent the best iinterest points. Hence, we may directly take the top\n # 5-10 points out of these.\n # cv2.imshow(\"Interest Points \"+str(count),colored_img)\n # cv2.waitKey(0)\n count+=1\n return CR\n\ndef generateHOG(img1_color,img1):\n IP_aerial1=harrisCorner(img1_color)\n # print(\"Interest points generated for the image....\")\n descp1=[]\n # Choosing limited Interest points from the ones we have got ()\n ra1=[];kp1=[];\n for itr in range(100):\n while(True):\n a=random.randint(0,len(IP_aerial1[0])-1)\n if a not in ra1:\n ra1.append(a)\n break\n kp1.append([IP_aerial1[1][a],IP_aerial1[0][a]]) #NOTE: We are putting in the 1st index and then the 0th index\n\n # print \"100 keypoints created....Time to create HoG descp\"\n\n #Since we are taking 100 random interest points.\n for itr in range(100):\n descp1.append(HOG(img1,kp1[itr][1],kp1[itr][0]))\n return descp1\n\n\n#MAIN() FUNCTION WAS PROVIDED FOR TESTING THIS PROGRAM\n# def main():\n# #**************HARRIS CORNERS AND HOG STARTS************************\n# img1_color = cv2.imread(\"aerial1.jpg\",1)\n# # img2_color = cv2.imread(\"aerial2.jpg\",1)\n# # cv2.imshow(\"Aerial1\", img1_color)\n# # cv2.waitKey(0)\n# # cv2.imshow(\"Aerial2\", img2_color)\n# # cv2.waitKey(0)\n# img1=cv2.cvtColor(img1_color,cv2.COLOR_BGR2GRAY)\n# # img2=cv2.cvtColor(img2_color,cv2.COLOR_BGR2GRAY)\n#\n# # generateHOG(img1_color,img1,img2_color,img2)\n#\n# generateHOG(img1_color,img1)\n#\n# #**************HARRIS CORNERS AND HOG ENDS************************ (We will now be feeding the descriptors to SVM)\n# # indices = matcher(descp1, descp2)\n# # # #TODO: display the output in a meaningful way\n# # #Concatenating two images\n# # coloredFeatureImage = concatenateTwoImages(img1_color,img2_color)\n# # color_m = np.amax(coloredFeatureImage.flatten())\n# # coloredFeatureImage/=color_m\n# # dictForPoints={}\n# # # for r in range(coloredFeatureImage.shape[0]):\n# # # for c in range(coloredFeatureImage.shape[1]):\n# # # if [c,r] in kp1:\n# # # cv2.circle(coloredFeatureImage,(c,r),10,(0,255,0),thickness=3)\n# # # dictForPoints[(c,r)] = (c,r)\n# # # if [c-img2.shape[1],r] in kp2:\n# # # cv2.circle(coloredFeatureImage,(c,r),10,(0,255,0),thickness=3)\n# # # dictForPoints[(c-img2.shape[1],r)] = (c,r)\n# # cv2.imshow(\"Feature Matching Image with circles (HoG)\",coloredFeatureImage)\n# # cv2.imwrite(\"HoG_Features_highlighted.jpg\",coloredFeatureImage*color_m)\n# # cv2.waitKey(0)\n# # #\n# # # for itr in indices:\n# # # if itr[0] == itr[1]:\n# # # cv2.line(coloredFeatureImage,dictForPoints[tuple(kp1[itr[0]])],dictForPoints[tuple(kp2[itr[1]])],(0,255,0),thickness=2)\n# # # else:\n# # # cv2.line(coloredFeatureImage,dictForPoints[tuple(kp1[itr[0]])],dictForPoints[tuple(kp2[itr[1]])],(0,0,255),thickness=2)\n# # # cv2.imshow(\"Feature Matching Image with lines (HoG)\",coloredFeatureImage)\n# # # cv2.imwrite(\"HoG_Features_matched.jpg\",coloredFeatureImage*color_m)\n# # # cv2.waitKey(0)\n# # #\n# # # #Comparison of HoG with SURF\n# # # surfMatcher(img1_color,img2_color)\n# *************************************************************************\n#\n# #**************DAUBECHIES D4 WAVELET TRANSFORM STARTS**********************\n# #Taking the gray-scale image\n# print \"Before Daubechies D4 transform: \\n\", img1\n# daubechies(img1)\n# print \"\\nAfter taking the Daubechies wavelet transform......\"\n# print img1\n# **************DAUBECHIES D4 WAVELET TRANSFORM ENDS**********************\n\n\n# if __name__ == '__main__':\n# main()","sub_path":"allFeatures.py","file_name":"allFeatures.py","file_ext":"py","file_size_in_byte":13781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"159422985","text":"\"\"\"\n @description: 来源:rohm\n 商城品牌:罗姆半导体\n 目标类目:DDR-SDRAM用线性稳压器\n 商城类目:线性稳压器\n 来源网址:http://www.rohm.com.cn/web/china/search/parametric/-/search/Linear%20Regulators%20for%20DDR-SDRAM\n @author: RoyalClown\n @date: 2016/11/18\n\"\"\"\nfrom DataAnalyse.dbDataGet.Util_data import DataProcessing\nfrom DataAnalyse.file_download.img_download import ImgDownload\nfrom DataAnalyse.file_download.pdf_download import PdfDownload\nfrom Lib.DBConnection.OracleConnection import OracleConnection\nfrom Spider.Rohm.VoltageRegulator.DDR_SDRAM.saveAndGo import all_go\n\n\nclass CCT2016111800000016:\n def __init__(self):\n self.url = \"http://www.rohm.com.cn/web/china/search/parametric/-/search/Linear%20Regulators%20for%20DDR-SDRAM\"\n self.task_code = \"CCT2016111800000016\"\n self.task_id = self.get_task_id()\n\n def get_task_id(self):\n orcl_conn = OracleConnection()\n cursor = orcl_conn.conn.cursor()\n cursor.execute(\n \"select cct_id from product$component_crawl_task where cct_taskid='{}'\".format(self.task_code))\n task_id = cursor.fetchone()[0]\n cursor.close()\n return task_id\n\n def go(self):\n # print(\"第一步开始进行爬取\")\n # all_go(task_code=self.task_code, task_id=self.task_id)\n # print(\"成功完成爬取数据到爬虫数据表\\n------------------现在开始下载pdf、img文件-----------------\")\n\n # pdf_download = PdfDownload(self.task_id)\n # pdf_download.go()\n # img_download = ImgDownload()\n # img_download.go()\n # print(\"pdf、img下载完成,开始对数据进行分析并存入数据库\")\n\n data_processing = DataProcessing()\n data_processing.go(self.task_id)\n\n\nif __name__ == \"__main__\":\n taskn = CCT2016111800000016()\n taskn.go()\n","sub_path":"StandardSpider/SpiderRulesMap/Rohm/CCT2016111800000016.py","file_name":"CCT2016111800000016.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"236827406","text":"def WCgetchar(file_name):\r\n f = open(file_name, \"r\")\r\n return len(f.read())\r\ndef WCgetline(file_name):\r\n f = open(file_name, \"r\")\r\n read = f.readlines()#以行为单位读取文本并存入列表\r\n return len(read)\r\nimport re\r\ndef WCgetword(file_name):\r\n f = open(file_name, \"r\")\r\n read = re.split(r'[^a-zA-Z]+', f.read())\r\n return len(read)\r\n\r\ndef main():\r\n str , name = input(\"输入命令符和文件路径(以空格分开):\\n\").split()\r\n if str == '-c':\r\n print('字符数:', WCgetchar(name))\r\n elif str == '-w':\r\n print('单词数:', getword(name))\r\n elif str == '-l':\r\n print('行数:', WCgetline(name))\r\n\r\n\r\nmain()","sub_path":"wc.py","file_name":"wc.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"347320100","text":"from django import template\n\nregister = template.Library()\n\n@register.simple_tag\ndef drawmenu(format_string,request):\n from .model import Node\n tree = Node.object.filter(r_lt = request.GET.get('menupk'))\n # загрузить темплейт, передав контекст MTT\n\n return ''","sub_path":"Django2/eventakte/city/templates/city/templatetags/draw_menu.py","file_name":"draw_menu.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"443239792","text":"import pytest\nfrom icevision.all import *\n\n\ndef test_bbox_simple():\n bbox = BBox.from_xyxy(1, 2, 3, 4)\n\n assert bbox.xyxy == [1, 2, 3, 4]\n assert bbox.yxyx == [2, 1, 4, 3]\n assert bbox.xywh == [1, 2, 2, 2]\n\n\ndef test_bbox_relative_xcycwh():\n w, h = 640, 480\n xcycwh = [0.7, 0.2, 0.1, 0.2]\n bbox = BBox.from_relative_xcycwh(*xcycwh, img_width=w, img_height=h)\n assert bbox.xyxy == [416, 48, 480, 144]\n assert bbox.relative_xcycwh(img_width=w, img_height=h) == pytest.approx(xcycwh)\n\n\ndef test_bbox_invalid_data_error():\n with pytest.raises(InvalidDataError) as e:\n BBox.from_xyxy(10, 20, 20, 20)\n","sub_path":"tests/core/test_bbox.py","file_name":"test_bbox.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"172537848","text":"import cv2\r\nimport numpy as np\r\nfrom hsv_histo import ostu\r\nimg = cv2.imread('C:/Users/zhang/OneDrive/Desktop/Coding/Project/sample/2.jpg')\r\ngblur=cv2.GaussianBlur(img,(9,9),0)\r\ngrey=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\r\ncanny1=cv2.Canny(grey, 50, 150)\r\nres=cv2.resize(img,(480,640),interpolation=cv2.INTER_LINEAR)\r\nhsv = cv2.cvtColor(res, cv2.COLOR_BGR2HSV)\r\nlower_hsv = np.array([0,43,45])\r\nupper_hsv = np.array([10,255,255])\r\nmask = cv2.inRange(hsv,lower_hsv,upper_hsv)\r\nkernel = np.ones((3, 3), np.uint8)\r\nopening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)\r\nclosing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)\r\ncanny=cv2.Canny(closing, 50, 150)\r\n#circles = cv2.HoughCircles(canny1, cv2.HOUGH_GRADIENT,1, 50, param1=60,param2=30,minRadius=15,maxRadius=55)\r\ncircles = cv2.HoughCircles(canny, cv2.HOUGH_GRADIENT,1, 60, param1=60,param2=10,minRadius=15,maxRadius=40)\r\ncircles = np.uint16(np.around(circles))\r\n# for i in circles[0,:]:\r\n# # draw the outer circle\r\n# cv2.circle(img,(i[0],i[1]),i[2],(255,255,255),2)\r\n# # draw the center of the circle\r\n# cv2.circle(img,(i[0],i[1]),2,(0,0,255),3)\r\n# cv2.imshow('detected c',img)\r\n# cv2.imshow('detected ',mask)\r\n# cv2.imshow('detected a ',canny)\r\n# cv2.imwrite(\"5.jpg\",opening)\r\n# cv2.waitKey(0)\r\n# cv2.destroyAllWindows()\r\n\r\n\r\nROIs=[]\r\ni=0\r\nfor circle in circles[0,:]:\r\n x, y, r = circle\r\n col_t=int(y-r)\r\n if col_t <0:\r\n col_t=0\r\n col_b=int(y+r)\r\n if col_b >480:\r\n col_b=480\r\n row_t=int(x-r)\r\n if row_t <0:\r\n row_t=0\r\n row_b=int(x+r)\r\n if row_b >640:\r\n row_b=640\r\n\r\n ROI=mask[col_t: col_b,row_t: row_b]\r\n cv2.imwrite(\"result\" + str(i) + \".jpg\", ROI)\r\n re=cv2.imread(\"result\"+str(i)+\".jpg\",0)\r\n i += 1\r\n _,_,ratio=ostu(re)\r\n if 0.19 < ratio < 0.3:#调整这个范围\r\n ROIs.append(circle)\r\n print(\"YES Ratio:\"+str(ratio))\r\n else:\r\n print(\"NO Ratio:\"+str(ratio))\r\n\r\nfor i in ROIs:\r\n # draw the outer circle\r\n cv2.circle(img,(i[0],i[1]),i[2],(255,255,255),2)\r\n # draw the center of the circle\r\n cv2.circle(img,(i[0],i[1]),2,(0,0,255),3)\r\n\r\ncv2.imshow('detected c',img)\r\ncv2.imshow('detected ',mask)\r\ncv2.imshow('detected a ',canny)\r\ncv2.imwrite(\"5.jpg\",opening)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()","sub_path":"test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":2289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"303941255","text":"T = int(input())\r\nclass abc:\r\n def inp(n1):\r\n l=[]\r\n for i in range(0,n1):\r\n item=input()\r\n l.append(item)\r\n return l\r\n\r\n def check(lst):\r\n l = len(lst)\r\n i = 0\r\n while i < l - 1 and lst[i] >= lst[i + 1]:\r\n i += 1\r\n while i < l - 1 and lst[i] <= lst[i + 1]:\r\n i += 1\r\n return \"Yes\" if i == l - 1 else (\"No\")\r\nl1=[]\r\nfor i in range(0,T):\r\n n = int(input())\r\n item2=abc.inp(n)\r\n l1.append(item2)\r\n print(l1)\r\n for j in range(0,len(l1)):\r\n x=abc.check(l1[j])\r\n print(x)","sub_path":"bgh.py","file_name":"bgh.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"305178556","text":"# # Importing Packages\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import figure\nimport numpy as np\nimport shutil\nimport imageio\nimport os\nimport natsort\nfrom natsort import natsorted\nfrom datetime import datetime, date, time\nimport scipy.io\nfrom scipy.io import loadmat\nfrom decimal import Decimal\nimport ast\n\n####################################################################################################\n\nreactor = input(\"Reactor? \")\n\nif reactor == \"MSRE\":\n fuel_type = input(\"LEU or HEU? \")\n if fuel_type == \"LEU\":\n heavy_metal = 0.013603741 #MTHM\n if fuel_type == \"HEU\":\n heavy_metal = 0.028277628\nelif reactor == \"MSBR\":\n heavy_metal = 58.58689935\nelif reactor == \"MSDR\":\n heavy_metal = 62.1 \n \n#efficiencies = natsorted([1, 0.8, 0.6, 0.4, 0.2, 0.15, 0.10, 0.05, 0.0])\n#efficiencies = natsorted([0.01, 0.02, 0.04, 0.08, 0.16, 0.32, 0.64])\nefficiencies = natsorted([0.03, 0.05, 0.07, 0.09, 0.11, 0.13, 0.15, 0.17, 0.33, 0.65])\neff = natsorted(list(((np.array(efficiencies) * 100).astype(int)).astype(str)))\nprint(natsorted(efficiencies))\nprint(natsorted(eff))\n\nplot_all = input(\"Plot all isotopes? \")\nif plot_all == \"Y\":\n print(\"Ok\")\nelse:\n isotopes = input(\"Provide list of isotopes to plot: \")\n isotopes = ast.literal_eval(isotopes)\n \nplot_solid = input(\"Plot isotopes in the solid_trap system? \")\n\n\n#####################################################################################################\n\nplt_directory = \"./Apollo/\" + reactor \n\nprint(\"Searching directory for .plt files...\")\nnames = []\nfor root, dirs, files in os.walk(plt_directory):\n for file in files:\n if file.endswith(\".plt\"):\n #print(os.path.join(root, file))\n names.append(os.path.join(root, file))\n\nif not names:\n print(\"List is empty\")\n\nnames = natsorted(names)\nnames\n\n##############################################################################################\n##############################################################################################\n################################## PLOTTING OPUS FILES #######################################\n##############################################################################################\n##############################################################################################\n\nx = 0 #Index for names\ny = 0 #Index for eff\n\n\nfor i in range(0, len(eff)):\n print(eff[y])\n \n\n # Reads .plt files and creates a dataframe, values are multiplied by heavy_metal\n print(\"Importing Data...\")\n fuel_salt = pd.read_csv(names[x], sep=\"\\s+\", skiprows=5)\n fuel_salt = fuel_salt.T\n fuel_salt.reset_index(level=0, inplace=True)\n fuel_salt = fuel_salt.rename(columns = {'index':'time'})\n fuel_salt.iloc[:,1:] = fuel_salt.iloc[:,1:].mul(heavy_metal)\n print(\"Finish importing fuel salt data...\")\n\n off_gas = pd.read_csv(names[x+1], sep=\"\\s+\", skiprows=5)\n off_gas = off_gas.T\n off_gas.reset_index(level=0, inplace=True)\n off_gas = off_gas.rename(columns = {'index':'time'})\n off_gas.iloc[:,1:] = off_gas.iloc[:,1:].mul(heavy_metal)\n print(\"Finish importing off_gas data...\")\n\n solid_trap = pd.read_csv(names[x+2], sep=\"\\s+\", skiprows=5)\n solid_trap = solid_trap.T\n solid_trap.reset_index(level=0, inplace=True)\n solid_trap = solid_trap.rename(columns = {'index':'time'})\n solid_trap.iloc[:,1:] = solid_trap.iloc[:,1:].mul(heavy_metal)\n print(\"Finish importing solid trap data...\")\n\n # Creates directory to store CSV files\n csv_path = (\"./\" + reactor + \"/OPUS/csv/\") #ADD REACTOR_BETA_{} WHERE {} IS EFFICIENCY\n if not os.path.exists(csv_path):\n os.makedirs(csv_path)\n\n fuel_salt[\"time\"] = fuel_salt[\"time\"].astype(\"float\")\n off_gas[\"time\"] = off_gas[\"time\"].astype(\"float\")\n solid_trap[\"time\"] = solid_trap[\"time\"].astype(\"float\")\n\n fuel_salt[\"time\"] = fuel_salt[\"time\"].astype(\"int\")\n off_gas[\"time\"] = off_gas[\"time\"].astype(\"int\")\n solid_trap[\"time\"] = solid_trap[\"time\"].astype(\"int\")\n \n # Saves dataframes as CSV's\n fuel_salt.to_csv((csv_path + \"salt_comp_\" + eff[y] + \".csv\"), sep=',', index=False)\n off_gas.to_csv((csv_path + \"off_gas_comp_\" + eff[y] + \".csv\"), sep=',', index=False)\n solid_trap.to_csv((csv_path + \"solid_trap_comp_\" + eff[y] + \".csv\"), sep=',', index=False)\n\n # Creates directory for each case to store plots\n text_path = (\"./\" + reactor + \"/OPUS/off_eff_\" + eff[y] + \"/\") \n if os.path.exists(text_path):\n shutil.rmtree(text_path)\n os.makedirs(text_path)\n \n # Plots every isotope in the off-gas system vs fuel_salt\n if plot_all == \"Y\":\n isotopes = list(off_gas.columns)\n isotopes.pop(0)\n else:\n print(\"Plotting supplied isotopes: {}\".format(isotopes))\n \n num_isotopes = len(isotopes)\n #num_isotopes = 10 # For Testing\n print(\"There are {} isotopes in the off-gas mixture.\".format(num_isotopes))\n print(\"Plotting data...\")\n \n \n for i in range(0, num_isotopes): \n isotope = isotopes[i]\n\n fig, ax1 = plt.subplots()\n\n color = 'tab:red'\n ax1.set_xlabel('Time (days)')\n ax1.set_ylabel('Fuel Salt (grams)', color=color)\n ax1.set_title(isotope)\n ax1.plot(fuel_salt['time'], fuel_salt[isotope], color=color)\n ax1.tick_params(axis='y', labelcolor=color)\n ax1.grid()\n\n ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis\n\n color = 'tab:blue'\n ax2.set_ylabel('Off-gas System (grams)', color=color) # we already handled the x-label with ax1\n ax2.plot(off_gas['time'], off_gas[isotope], color=color)\n ax2.tick_params(axis='y', labelcolor=color)\n ax2.grid()\n\n fig.tight_layout() # otherwise the right y-label is slightly clipped\n #plt.savefig(\"./Images/\" + isotope + \"_offgas.png\")\n #plt.savefig((\"./\" + reactor + \"/OPUS/\" + isotope + \"_offgas.png\")) #ADD REACTOR PATH\n plt.savefig((\"./\" + reactor + \"/OPUS/off_eff_\" + eff[y] + \"/\" + isotope + \"_offgas.png\"), \n format='png', dpi=300)\n plt.close()\n \n # Plots every isotope in the solid trap system vs fuel_salt\n if plot_solid == \"Y\":\n if plot_all == \"Y\":\n isotopes = list(solid_trap.columns)\n isotopes.pop(0)\n else:\n print(\"Plotting supplied isotopes: {}\".format(isotopes))\n\n num_isotopes = len(isotopes) \n #num_isotopes = 10\n print(\"There are {} isotopes in the solid trap mixture.\".format(num_isotopes))\n print(\"Plotting data...\")\n\n for i in range(0, num_isotopes): \n isotope = isotopes[i]\n\n fig, ax1 = plt.subplots()\n\n color = 'tab:red'\n ax1.set_xlabel('Time (days)')\n ax1.set_ylabel('Fuel Salt (grams)', color=color)\n ax1.set_title(isotope)\n ax1.plot(fuel_salt['time'], fuel_salt[isotope], color=color)\n ax1.tick_params(axis='y', labelcolor=color)\n ax1.grid()\n\n ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis\n\n color = 'tab:blue'\n ax2.set_ylabel('Solid Trap (grams)', color=color) # we already handled the x-label with ax1\n ax2.plot(solid_trap['time'], solid_trap[isotope], color=color)\n ax2.tick_params(axis='y', labelcolor=color)\n ax2.grid()\n\n fig.tight_layout() # otherwise the right y-label is slightly clipped\n #plt.savefig(\"./Images/\" + isotope + \"_solid.png\")\n #plt.savefig((\"./\" + reactor + \"/OPUS/\" + isotope + \"_solid.png\"))\n plt.savefig((\"./\" + reactor + \"/OPUS/off_eff_\" + eff[y] + \"/\" + isotope + \"_solid.png\"), \n format='png', dpi=300)\n plt.close()\n else:\n print(\"Solid Trap Isotopes were not plotted...\")\n\n print(\"Finish Plotting all OPUS files.\")\n \n x = x + 3\n y = y + 1\n\nfuel_salt_0 = pd.read_csv((\"./\" + reactor + \"/OPUS/csv/\" + \"salt_comp_\" + eff[0] + \".csv\"))\nfuel_salt_5 = pd.read_csv((\"./\" + reactor + \"/OPUS/csv/\" + \"salt_comp_\" + eff[1] + \".csv\"))\nfuel_salt_100 = pd.read_csv((\"./\" + reactor + \"/OPUS/csv/\" + \"salt_comp_\" + eff[-1] + \".csv\"))\n\nisotopes_interest = [\"xe135\", \"kr86\"]\n\nfor i in isotopes_interest:\n fig, ax1 = plt.subplots()\n ax1.title.set_text('Off-gas Efficiency Impact on ' + i + ' Inventory')\n ax1.grid(True)\n\n color = 'tab:red'\n ax1.set_xlabel('Time (days)', fontsize=14)\n ax1.set_ylabel('Grams', color=color, fontsize=14)\n ax1.plot(fuel_salt_0[\"time\"], fuel_salt_0[i], color=color, label='Off-Gas Efficiency: 0%')\n ax1.tick_params(axis='y', labelcolor=color)\n\n ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis\n\n color = 'tab:blue'\n ax2.set_ylabel('Grams', color=color, fontsize=14) # we already handled the x-label with ax1\n ax2.plot(fuel_salt_100[\"time\"], fuel_salt_100[i], color=color, label='Off-gas Efficiency: 100%')\n ax2.tick_params(axis='y', labelcolor=color)\n\n h1, l1 = ax1.get_legend_handles_labels()\n h2, l2 = ax2.get_legend_handles_labels()\n ax1.legend(h1+h2, l1+l2, loc=\"lower right\")\n\n fig.tight_layout() # otherwise the right y-label is slightly clipped\n plt.savefig((\"./\" + reactor + \"/OPUS/\" + i + \"_100_0_impact.png\"), \n format='png', dpi=500)\n #plt.show()\n \nfor i in isotopes_interest:\n fig, ax1 = plt.subplots()\n ax1.title.set_text('Off-gas Efficiency Impact on ' + i + ' Inventory')\n ax1.grid(True)\n\n color = 'tab:red'\n ax1.set_xlabel('Time (days)')\n ax1.set_ylabel('Grams', color=color)\n ax1.plot(fuel_salt_5[\"time\"], fuel_salt_5[i], color=color, label='Off-Gas Efficiency: 5%')\n ax1.tick_params(axis='y', labelcolor=color)\n\n ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis\n\n color = 'tab:blue'\n ax2.set_ylabel('Grams', color=color) # we already handled the x-label with ax1\n ax2.plot(fuel_salt_100[\"time\"], fuel_salt_100[i], color=color, label='Off-gas Efficiency: 100%')\n ax2.tick_params(axis='y', labelcolor=color)\n\n h1, l1 = ax1.get_legend_handles_labels()\n h2, l2 = ax2.get_legend_handles_labels()\n ax1.legend(h1+h2, l1+l2, loc=\"lower right\")\n\n fig.tight_layout() # otherwise the right y-label is slightly clipped\n plt.savefig((\"./\" + reactor + \"/OPUS/\" + i + \"_100_5_impact.png\"), \n format='png', dpi=500)\n #plt.show()","sub_path":"SCALE_beta_parametric/opus.py","file_name":"opus.py","file_ext":"py","file_size_in_byte":10476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"503929876","text":"from orator import Model, DatabaseManager\nfrom time import sleep\nconfig = {\n 'mysql': {\n 'driver': 'mysql',\n 'host': 'localhost',\n 'database': 'biblioteca',\n 'user': 'root',\n 'password': 'admin',\n 'prefix': ''\n }\n}\ndb = DatabaseManager(config)\nModel.set_connection_resolver(db)\n\ndef AddAutor():\n Nombre = input(\"Ingresar Nombre del Autor:\\n\")\n Correo = input(\"Ingresar Correo del Autor:\\n\")\n res = db.table(\"Autor\").insert({\n 'Nombre_Autor' : Nombre,\n 'Correo' : Correo\n })\n if res:\n print(\"Se ah insertado correctamente.\")\n sleep(2)\n else:\n print(\"Hubo un problema al insertar el dato.\\a\")\n sleep(2)\n\ndef ListAllAutor():\n res = db.table(\"Autor\").get()\n print(\"Id\",\"Nombre Autor\", \"Correo\", sep=\"\\t\")\n for row in res:\n print(row.idAutor, row.Nombre_Autor, row.Correo, sep=\"\\t\")\n sleep(2)\n \ndef UpdateAutor():\n ListAllAutor()\n idAutor = input(\"Ingresar Id del Autor:\\n\")\n Nombre = input(\"Ingresar nuevo Nombre del Autor:\\n\")\n Correo = input(\"Ingresar nuevo Correo del Autor:\\n\")\n resup = db.table(\"Autor\").where(\"idAutor\", idAutor).update({\n 'Nombre_Autor' : Nombre,\n 'Correo' : Correo \n })\n if resup:\n print(\"Se ah modificado al autor\")\n sleep(2)\n else:\n print(\"No fue posible modificar al autor\")\n sleep(2)\n","sub_path":"Semana8Hackaton/Bryan Arias/App/Controller/AutorController.py","file_name":"AutorController.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"58490177","text":"#!/usr/bin/python\n\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\nnumbersPerNeuron = 1\nnumbersPerConnection = 3\n\nymin = -1.2\nymax = 1.2\nxmin = -1.2\nxmax = 1.2\nscale = 2\nwidth = (xmax - xmin)*scale\nheight = (ymax - ymin)*scale\nxmargin = (xmax - xmin - 1)/8\n\ninhibColor = [1,0,0]\nexcitColor = [0,0,1]\n\narrowDist = 0.15\nneuronRadius = 0.1\narrowWidth = 0.03\narrowLength = 0.05\n\nclass NetworkParser:\n def __init__(self, split_line):\n self.current_index = 0\n self.split_line = split_line\n\n def next_number(self):\n i = self.current_index\n self.current_index += 1\n return self.split_line[i]\n\n\ndef readFile(filename):\n neurons = []\n connections = []\n\n inputFile = open(filename, 'r')\n\n for line in inputFile:\n line = line.strip()\n numbers = line.split()\n\n np = NetworkParser(numbers)\n\n nbOfNeurons = int(np.next_number())\n nbOfConnections = int(np.next_number())\n\n for i in range(nbOfNeurons):\n neuron = []\n for i in range(numbersPerNeuron):\n neuron.append(float(np.next_number()))\n neurons.append(neuron)\n\n for i in range(nbOfConnections):\n connection = []\n connection.append(int(np.next_number()))\n connection.append(int(np.next_number()))\n connection.append(float(np.next_number()))\n connections.append(connection)\n\n inputFile.close()\n\n return neurons, connections\n\ndef plotNetwork(neurons, connections):\n ax = plt.axes()\n ax.axes.xaxis.set_visible(False)\n ax.axes.yaxis.set_visible(False)\n\n positions = [(0,0)] * len(neurons)\n angle = 0.0\n angleUpdate = 2 * np.pi / len(neurons)\n\n for neuronIndex in range(len(neurons)):\n neuron = neurons[neuronIndex]\n bias = neuron[0]\n x = np.sin(angle)\n y = np.cos(angle)\n angle = angle + angleUpdate\n positions[neuronIndex] = (x,y)\n\n #Draw the neuron outline\n circ = plt.Circle((x,y), radius=neuronRadius, facecolor=[1,1,1], edgecolor=[0,0,0], fill=True, linewidth=1, zorder=2)\n ax.add_patch(circ)\n\n #Determine color based on bias\n if bias < 0.0:\n currentColor = inhibColor\n elif bias > 0.0:\n currentColor = excitColor\n else: continue\n\n #Determine size based on bias\n w = ((0.8*abs(bias))+1)*0.05\n\n #Draw the bias\n if w > 0.05:\n circ = plt.Circle((x,y), radius=w, color=currentColor, fill=True, zorder=3)\n ax.add_patch(circ)\n\n\n for connectionIndex in range(len(connections)):\n connection = connections[connectionIndex]\n source, target, weight = connection\n x1, y1 = positions[source]\n x2, y2 = positions[target]\n\n #Determine color based on weight\n if weight < 0.0:\n currentColor = inhibColor\n elif weight > 0.0:\n currentColor = excitColor\n else: continue\n\n #Determine line weight based on weight\n w = (2*abs(weight))+0.2\n\n #If the target equals the source, draw a circle; otherwise draw an arrow\n if source == target:\n x_offset = neuronRadius\n y_offset = neuronRadius\n if x1 < 0: x_offset *= -1\n if y1 < 0: y_offset *= -1\n circ = plt.Circle((x1+x_offset, y1+y_offset), radius=neuronRadius, color=currentColor, fill=False, linewidth=w, zorder=1)\n ax.add_patch(circ)\n else:\n xdiff = x2 - x1\n ydiff = y2 - y1\n length = math.sqrt(xdiff*xdiff + ydiff*ydiff)\n xratio = xdiff/length\n yratio = ydiff/length\n xdiff -= xratio*arrowDist\n ydiff -= yratio*arrowDist\n arr = plt.arrow(x1, y1, xdiff, ydiff, head_width=arrowWidth, head_length=arrowLength, color=currentColor, linewidth=w, zorder=1)\n ax.add_patch(arr)\n\n\n\ni=0\nfor arg in sys.argv[1:]:\n plt.figure(i, figsize=(width,height))\n neurons, connections = readFile(arg)\n plotNetwork(neurons, connections)\n plt.ylim([ymin, ymax])\n plt.xlim([xmin, xmax])\n plt.savefig(arg + \".png\")\n plt.clf()\n i+=1\n","sub_path":"plotNetworkCircle.py","file_name":"plotNetworkCircle.py","file_ext":"py","file_size_in_byte":3968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"105502576","text":"\"\"\"Models of the test app.\"\"\"\n\nfrom django.db import models\n\n\nclass Image(models.Model):\n \"\"\"\n Model for images.\n\n All images will be placed in the dynamic directory based on a current year,\n separate from notes and quotes.\n \"\"\"\n\n image = models.ImageField(\n upload_to='%Y/images',\n blank=True,\n null=True\n )\n\n\nclass Note(models.Model):\n \"\"\"\n Model for notes.\n\n All notes will be placed in the same directory as quotes, separate from\n images.\n \"\"\"\n\n note = models.FileField(\n upload_to='uploads',\n blank=True,\n null=True\n )\n\n\nclass Quote(models.Model):\n \"\"\"\n Model for quotes.\n\n All quotes will be placed in the same directory as notes, separate from\n images.\n \"\"\"\n\n quote = models.FileField(\n upload_to='uploads',\n blank=True,\n null=True\n )\n","sub_path":"django_stainless/tests/app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"407547075","text":"# generic views (from rest_framework)\n#http://www.django-rest-framework.org/api-guide/generic-views/\nfrom django.db.models import Q\nfrom rest_framework import generics, mixins\nfrom postings.models import BlogPost\n\nfrom.serializers import BlogPostSerializer\n\n#add createModeMixin to allow posting new objects\nclass BlogPostAPIView(mixins.CreateModelMixin, generics.ListAPIView):\n\tlookup_field = 'pk' #pk by default, could be slug or id\n\n\tserializer_class = BlogPostSerializer\n\n\tquery_set = BlogPost.objects.all()\n\n\tdef get_queryset(self): #we have to overide this\n\t\tqs = BlogPost.objects.all()\n\t\tquery = self.request.GET.get(\"q\")\n\n\t\tif query is not None:\n\t\t\tqs = qs.filter(Q(title__icontains=query)|Q(content__icontains=query)).distinct() #Q allows to do OR\n\t\treturn qs\n\n\tdef perform_create(self, serializer):\n\t\tserializer.save(user=self.request.user)\n\n\tdef post(self, request, *args, **kwargs):\n\t\treturn self.create(request, *args, **kwargs)\n\nclass BlogPostRUDView(generics.RetrieveUpdateDestroyAPIView):\n\tlookup_field = 'pk' #pk by default, could be slug or id\n\n\tserializer_class = BlogPostSerializer\n\n\tquery_set = BlogPost.objects.all()\n\n\tdef get_queryset(self): #we have to overide this\n\t\treturn BlogPost.objects.all()\n\n\t#commented this out since this is what lookup_field basically does\n\t# def get_object():\n\t# \tpk = self.kwargs.get(\"pk\")\n\t# \treturn BlogPost.objects.get(pk=pk)\n\n\n","sub_path":"django/rest_api/postings/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"271026920","text":"# -*- coding: utf-8 -*-\n__author__ = 'liuyuxiao'\n\n\nimport json\nfrom rest_framework import status\nfrom aws.utilities.push_message import api_view\n#from rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\n\n\n\n\nheader = dict()\nheader['CharSet'] = 'UTF-8'\n\n\n\n@api_view(['GET'])\ndef basicpersoninfo(request):\n if request.method == 'GET':\n form_array = []\n #姓\n firstname_dict = textinput('firstname','horizontalBootstrapLabelInMultiSmall','姓',True,'姓',3)\n form_array.append(firstname_dict)\n\n #名\n lastname_dict = textinput('lastname','horizontalBootstrapLabelInMultiSmall','名',True,'名',3)\n form_array.append(lastname_dict)\n\n\n englishname_dict = textinput('englishname','horizontalBootstrapLabelInMultiSmall','英文名字',False,'英文名字',5)\n form_array.append(englishname_dict)\n\n\n option_array = []\n option_array.append({'name':'男','value':'男'})\n option_array.append({'name':'女','value':'女'})\n gender_button = radio_button('sex','性别',True,option_array)\n form_array.append(gender_button)\n\n #birthday\n birthday_selected = dayselected('出生日期','birthday_year','birthday_month','birthday_day')\n form_array.append(birthday_selected)\n\n #grade\n grade_array = []\n grade_array.append({'name':'初一及以前','value':'初一及以前'})\n grade_array.append({'name':'初二','value':'初二'})\n grade_array.append({'name':'初三','value':'初三'})\n\n grade_selected = multiselected('current_grade','当前年级',True,grade_array,'初三')\n form_array.append(grade_selected)\n\n\n #race\n race_array = []\n race_array.append({'name':'亚洲人','value':'亚洲人'})\n race_array.append({'name':'白人','value':'白人'})\n race_array.append({'name':'黑人','value':'黑人'})\n race_array.append({'name':'拉美人','value':'拉美人'})\n\n\n\n race_selected = multiselected('race','种族',True,race_array,'亚洲人')\n form_array.append(race_selected)\n\n #language\n language_array = []\n language_array.append({'name':'英语','value':'英语'})\n language_array.append({'name':'汉语','value':'汉语'})\n\n\n language_selected = multiselected('language','母语',True,language_array,'汉语')\n form_array.append(language_selected)\n\n\n #Mobile\n mobile_dict = textinput('mobile','horizontalInput','手机号',True,'手机号',10)\n form_array.append(mobile_dict)\n\n #email\n email_dict = textinput('email','horizontalInput','邮箱地址',True,'邮箱地址',10)\n form_array.append(email_dict)\n\n #birth_country\n country_array = country_array_return()\n birthcountry_selected = multiselected('birthcountry','出生国家',True,country_array,'中国大陆')\n form_array.append(birthcountry_selected)\n\n #national_country\n national_selected = multiselected('national_country','现有国籍',True,country_array,'中国大陆')\n form_array.append(national_selected)\n\n #home_address\n homeaddress_input = textinput('homeaddress','horizontalInput','家庭住址',True,'家庭住址',20)\n form_array.append(homeaddress_input)\n\n #national_country\n living_selected = multiselected('livingcountry','居住国家',True,country_array,'中国大陆')\n form_array.append(living_selected)\n\n #postcode\n postcode_input =textinput('postcode','horizontalInput','住址邮编',False,'邮编',6)\n form_array.append(postcode_input)\n\n result = dict()\n placeholder = dict()\n result['form'] = form_array\n placeholder['firstname'] = u'严'\n placeholder['lastname'] = u'海飚'\n placeholder['birthday_year'] = '1995'\n placeholder['birthday_month'] = '1'\n placeholder['birthday_day'] = '1'\n placeholder['current_grade'] = u'初三'\n placeholder['language'] = u'汉语'\n placeholder['race'] = u'亚洲人'\n placeholder['birthcountry'] = u'中国大陆'\n placeholder['national_country'] = u'中国大陆'\n placeholder['livingcountry'] = u'中国大陆'\n\n\n result['placeholder'] = placeholder\n\n return Response({'result': result}, headers=header, status=status.HTTP_200_OK)\n\n\n@api_view(['GET'])\ndef family_info(request):\n if request.method == 'GET':\n form_array = []\n\n #家庭关系\n relationship_array = []\n relationship_array.append({'name':'父亲','value':'父亲'})\n relationship_array.append({'name':'母亲','value':'母亲'})\n relationship_array.append({'name':'其他监护人','value':'其他监护人'})\n relation_selected = multiselected('relationship','关系',True,relationship_array,'父亲')\n form_array.append(relation_selected)\n\n #手机\n mobile_input = textinput('mobile','horizontalInput','手机号',True,'手机号',10)\n form_array.append(mobile_input)\n\n #邮箱\n email_input = textinput('email','horizontalInput','邮箱',True,'邮箱',10)\n form_array.append(email_input)\n\n #姓\n firstname_dict = textinput('firstname','horizontalBootstrapLabelInMultiSmall','姓',True,'姓',3)\n form_array.append(firstname_dict)\n\n #名\n lastname_dict = textinput('lastname','horizontalBootstrapLabelInMultiSmall','名',True,'名',3)\n form_array.append(lastname_dict)\n\n #职位头衔\n position = textinput('position','horizontalBootstrapLabelInMultiSmall','职位头衔',True,'职位头衔',3)\n form_array.append(position)\n\n #最高学历\n xueli_array = []\n xueli_array.append({'name':'初中','value':'初中'})\n xueli_array.append({'name':'高中','value':'高中'})\n xueli_array.append({'name':'本科','value':'本科'})\n xueli_array.append({'name':'研究生','value':'研究生'})\n xueli_array.append({'name':'博士生','value':'博士生'})\n xueli_selected = multiselected('xueli','最高学历',True,xueli_array,'最高学历')\n form_array.append(xueli_selected)\n\n #高中\n highschool_input = textinput('highschool','horizontalInput','高中',True,'高中',10)\n form_array.append(highschool_input)\n\n\n #大学\n college_input = textinput('college','horizontalInput','大学',True,'大学',10)\n form_array.append(college_input)\n\n #所在公司\n company_input = textinput('company','horizontalInput','所在公司',True,'公司',10)\n form_array.append(company_input)\n\n return Response({'result':form_array},headers=header,status=status.HTTP_200_OK)\n\n\n@api_view(['GET'])\ndef education_info(request):\n if request.method == 'GET':\n form_array = []\n\n #current_school\n school_input = textinput('current_school','horizontalInput','当前学校名称',True,'当前学校名称',10)\n form_array.append(school_input)\n\n enter_school_selected = dayselected('入学日期','enterschool_year','enterschool_month','enterschool_day')\n form_array.append(enter_school_selected)\n\n #school_days\n\n return Response({'result':form_array},headers=header,status=status.HTTP_200_OK)\n\n\n@api_view(['GET'])\ndef application_general_info(request):\n if request.method == 'GET':\n\n form_array =[]\n #班级排名\n class_rank = []\n class_rank.append({'name':'前1%','value':'前1%'})\n class_rank.append({'name':'前5%','value':'前5%'})\n class_rank.append({'name':'前10%','value':'前10%'})\n class_rank.append({'name':'前25%','value':'前25%'})\n class_rank.append({'name':'前50%','value':'前50%'})\n class_rank.append({'name':'前75%','value':'前75%'})\n\n class_rank_selected = multiselected('class_rank','班级排名',True,class_rank,'班级排名')\n form_array.append(class_rank_selected)\n\n #班级人数\n class_size = []\n class_size.append({'name':'20人以下','value':'20人以下'})\n class_size.append({'name':'20人-50人','value':'20人-50人'})\n class_size.append({'name':'50以上','value':'50以上'})\n\n class_size_selected = multiselected('class_size','班级大小',True,class_size,'班级大小')\n form_array.append(class_size_selected)\n\n return Response({'result':form_array},headers=header,status=status.HTTP_200_OK)\n\ndef textinput(key,type,label,required,placeholder,width):\n form_dict1 = dict()\n form_dict1['key'] = key\n form_dict1['type'] = type\n templateOptions1 = dict()\n templateOptions1['label'] = label\n templateOptions1['required'] = required\n templateOptions1['placeholder'] = placeholder\n templateOptions1['width'] = width\n form_dict1['templateOptions'] = templateOptions1\n return form_dict1\n\n\ndef radio_button(key,label,required,option_array):\n form_dict4 = dict()\n form_dict4['key'] = key\n form_dict4['type'] = 'horizontalRadio'\n templateOptions4 = dict()\n templateOptions4['label'] = label\n templateOptions4['required'] = required\n templateOptions4['options'] = option_array\n form_dict4['templateOptions'] = templateOptions4\n return form_dict4\n\n\ndef multiselected(key,label,required,name_value_array,placeholder):\n #birthday\n form_dict5 = dict()\n form_dict5['key'] = key\n form_dict5['type'] = 'horizontalBootstrapSelectInMulti'\n templateOptions5 = dict()\n templateOptions5['label'] = label\n templateOptions5['required'] = required\n templateOptions5['options'] = name_value_array\n templateOptions5['placeholder'] = placeholder\n form_dict5['templateOptions'] = templateOptions5\n return form_dict5\n\n\n\ndef dayselected(label,firstkey,secondkey,thirdkey):\n #birthday\n form_dict5 = dict()\n form_dict5['type'] = 'multiField'\n templateOptions5 = dict()\n\n fields_array = []\n year_dict = dict()\n year_dict['type'] = 'horizontalBootstrapSelectInMulti'\n year_dict['key'] = firstkey\n templateOptions1 = dict()\n templateOptions1['label'] = label\n templateOptions1['required'] = True\n option_array1 = []\n for i in range(1980,2005):\n option_array1.append({'name':str(i)+'年','value':str(i)})\n templateOptions1['options'] = option_array1\n templateOptions1['width'] = 6\n year_dict['templateOptions'] = templateOptions1\n fields_array.append(year_dict)\n\n month_dict = dict()\n month_dict['type'] = 'horizontalBootstrapSelectInMultiNoLabel'\n month_dict['key'] = secondkey\n templateOptions2 = dict()\n templateOptions2['required'] = True\n option_array2 = []\n for i in range(1,12):\n option_array2.append({'name':str(i)+'月','value':str(i)})\n templateOptions2['options'] = option_array2\n templateOptions2['width'] = 2\n month_dict['templateOptions'] = templateOptions2\n month_dict['expressionProperties'] = {'templateOptions.disabled':'!model.birthday_year'}\n fields_array.append(month_dict)\n\n day_dict = dict()\n day_dict['type'] = 'horizontalBootstrapSelectInMultiNoLabel'\n day_dict['key'] = thirdkey\n templateOptions3 = dict()\n templateOptions3['required'] = True\n option_array3 = []\n for i in range(1,30):\n option_array3.append({'name':str(i)+'日','value':str(i)})\n templateOptions3['options'] = option_array3\n templateOptions3['width'] = 2\n day_dict['templateOptions'] = templateOptions3\n day_dict['expressionProperties'] = {'templateOptions.disabled':'!model.birthday_year'}\n fields_array.append(day_dict)\n\n templateOptions5['fields'] = fields_array\n form_dict5['templateOptions'] = templateOptions5\n\n return form_dict5\n\n\n\ndef country_array_return():\n country_array = []\n country_array.append({'name':'中国大陆','value':'中国大陆'})\n country_array.append({'name':'中国香港','value':'中国香港'})\n country_array.append({'name':'中国澳门','value':'中国澳门'})\n country_array.append({'name':'中国台湾','value':'中国台湾'})\n country_array.append({'name':'美国','value':'美国'})\n country_array.append({'name':'加拿大','value':'加拿大'})\n country_array.append({'name':'英国','value':'英国'})\n return country_array\n\n\n\n\n\n\n\n\n\n","sub_path":"aws/views/personform.py","file_name":"personform.py","file_ext":"py","file_size_in_byte":12356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"243108821","text":"import json\nimport requests\nimport webbrowser\nimport re\n\nfrom bots import talk_bot\n\ndef get_location():\n talk_bot.talk(\"Getting Location ... \")\n send_url = 'http://freegeoip.net/json'\n r = requests.get(send_url)\n location = json.loads(r.text)\n return location\n\ndef locate_me():\n\tcity = get_location()['city']\n\ttalk_bot.talk(\"You are at \" + city)\n\ndef directions(command):\n\treg_ex = re.search('from (.*) to (.*)', command)\n\tif reg_ex:\n\t\tfrom_city = reg_ex.group(1)\n\t\tto_city = reg_ex.group(2)\n\t\ttalk_bot.talk('Opening up directions from ' + from_city + 'to ' + to_city)\n\t\turl = \"https://www.google.com/maps/dir/{0}/{1}\".format(from_city, to_city)\n\t\twebbrowser.open(url)\n\telse:\n\t\ttalk_bot.talk('Check the command format, opening up google maps to enter manually')\n\t\turl = \"https://www.google.com/maps/place/{0}\".format(get_location()['city'])\n\t\twebbrowser.open(url)","sub_path":"bots/location_bot.py","file_name":"location_bot.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"214325276","text":"import json\nimport numpy as np\nfrom scipy.ndimage.filters import gaussian_filter1d\nimport keras.backend as K\nfrom keras import metrics\nfrom keras.utils import to_categorical\nfrom keras.callbacks import *\nfrom keras.losses import *\nfrom keras.optimizers import *\nfrom matplotlib import pyplot as plt\n\nfrom training_data.data_getter import DataGetter, QUESTION_ANSWERS, QUESTIONS\nfrom models.ff2.a_smartphone_only.create_2a import model as smartphone_only_model\nfrom models.ff2.a_smartphone_only.create_2a import model_flat as smartphone_only_model_flat\nfrom models.ff2.show_off.show_off import model as show_off_model\nfrom models.ff2.c_full_pc.create_2c import model as full_pc_model\nfrom models.ff2.d_future_a.create_2d import model as future_a_model\n\n\n# a, b, c, d: start with training with other_users_training_data\n# a, c, d: then train with observed_user_training_data\n\n\ndef num_correct_pred_no_cat(y_true, y_pred):\n return K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n\n\ndef num_correct_pred(y_true, y_pred):\n return K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n\n\ndef train():\n data_getter = DataGetter()\n (val_x, val_y), (observed_x, observed_y), (others_x, others_y) = data_getter.generate_training_data()\n\n val_y_flat = val_y.reshape((-1, QUESTION_ANSWERS * QUESTIONS))\n observed_y_flat = observed_y.reshape((-1, QUESTION_ANSWERS * QUESTIONS))\n others_y_flat = others_y.reshape((-1, QUESTION_ANSWERS * QUESTIONS))\n\n val_y_no_cats = np.argmax(val_y, axis=-1)\n observed_y_no_cats = np.argmax(observed_y, axis=-1) # its not possible, to use categorized Data with model_a\n others_y_no_cats = np.argmax(others_y, axis=-1)\n\n combined_x = np.concatenate((observed_x, others_x))\n combined_y = np.concatenate((observed_y, others_y))\n combined_y_no_cats = np.concatenate((observed_y_no_cats, others_y_no_cats))\n combined_y_flat = np.concatenate((observed_y_flat, others_y_flat))\n\n com_with_val_x = np.concatenate((observed_x, others_x, val_x))\n com_with_val_y = np.concatenate((observed_y, others_y, val_y))\n\n print(\"Combined x: \", np.shape(combined_x))\n print(\"Combined y: \", np.shape(combined_y))\n print(\"Combined y no cats: \", np.shape(combined_y_no_cats))\n\n smartphone_only_model.compile(loss=mean_squared_error, optimizer=Adam())\n smartphone_only_model_flat.compile(loss=mean_squared_error, optimizer=Adam())\n show_off_model.compile(loss=categorical_crossentropy, optimizer=Adam())\n\n # define callbacks\n so_callback_results = []\n so_flat_callback_results = []\n show_off_callback_results = []\n\n def compare_so(epoch, logs):\n predictions = smartphone_only_model.predict(val_x)\n diff = np.abs((predictions - val_y_no_cats).flatten())\n diff = diff[np.where(diff <= 0.5)]\n percentage = diff.size / val_y_no_cats.size\n so_callback_results.append(percentage)\n\n def compare_so_flat(epoch, logs):\n predictions = smartphone_only_model_flat.predict(val_x)\n predictions = np.argmax(predictions.reshape((-1, 4)), axis=-1)\n diff = np.abs(predictions - val_y_no_cats.flatten())\n diff = diff[np.where(diff <= 0.5)]\n percentage = diff.size / val_y_no_cats.size\n so_flat_callback_results.append(percentage)\n\n def compare_show_off(epoch, logs):\n predictions = show_off_model.predict(val_x)\n predictions = np.argmax(predictions, axis=-1).flatten()\n vals = np.argmax(val_y, axis=-1).flatten()\n diff = np.abs(predictions - vals)\n diff = diff[np.where(diff <= 0.5)]\n percentage = diff.size / val_y_no_cats.size\n show_off_callback_results.append(percentage)\n\n so_callback = LambdaCallback(on_epoch_end=compare_so)\n so_flat_callback = LambdaCallback(on_epoch_end=compare_so_flat)\n show_off_callback = LambdaCallback(on_epoch_end=compare_show_off)\n\n # Pre training stage with others user data\n pre_training_epochs = 100\n\n hist_a = smartphone_only_model.fit([combined_x],\n combined_y_no_cats,\n epochs=pre_training_epochs,\n validation_data=[val_x, val_y_no_cats],\n callbacks=[so_callback])\n\n hist_a_flat = smartphone_only_model_flat.fit([combined_x],\n combined_y_flat,\n epochs=pre_training_epochs,\n validation_data=[val_x, val_y_flat],\n callbacks=[so_flat_callback])\n\n hist_b = show_off_model.fit([com_with_val_x],\n com_with_val_y,\n epochs=pre_training_epochs,\n validation_data=[val_x, val_y],\n callbacks=[show_off_callback])\n # full_pc_model.fit([others_x], others_y, epochs=1, validation_data=[val_x, val_y])\n # future_a_model.fit([others_x], others_y, epochs=1, validation_data=[val_x, val_y])\n\n # TODO: Plot Data\n hist_a_data = hist_a.history[\"loss\"]\n hist_a_val_data = hist_a.history[\"val_loss\"]\n\n hist_a_data_flat = hist_a_flat.history[\"loss\"]\n hist_a_val_data_flat = hist_a_flat.history[\"val_loss\"]\n\n hist_b_data = hist_b.history[\"loss\"]\n hist_b_val_data = hist_b.history[\"val_loss\"]\n\n with open(\"stored_train4.json\", \"w\") as store:\n json.dump({\n \"hist_a\": hist_a_data,\n \"hist_a_val\": hist_a_val_data,\n \"so_results\": so_callback_results,\n \"hist_a_flat\": hist_a_data_flat,\n \"hist_a_val_flat\": hist_a_val_data_flat,\n \"so_flat_results\": so_flat_callback_results,\n \"hist_b\": hist_b_data,\n \"hist_b_val\": hist_b_val_data,\n \"show_off_results\": show_off_callback_results\n }, store)\n\n\ndef show():\n with open(\"stored_train4.json\") as store:\n hists = json.load(store)\n hist_a_data = hists[\"hist_a\"]\n hist_a_val_data = hists[\"hist_a_val\"]\n so_results = hists[\"so_results\"]\n hist_a_data_flat = hists[\"hist_a_flat\"]\n hist_a_val_data_flat = hists[\"hist_a_val_flat\"]\n so_flat_results = hists[\"so_flat_results\"]\n hist_b_data = hists[\"hist_b\"]\n hist_b_val_data = hists[\"hist_b_val\"]\n show_off_results = hists[\"show_off_results\"]\n\n \"\"\"\n # plt.plot(hist_a_data, label=\"Trainingsdaten\")\n plt.plot(hist_a_val_data, label=\"Validierungsdaten\")\n plt.xlabel(\"Epochen\")\n plt.ylabel(\"Fehlerwert\")\n plt.title(\"Fehlerwerte des Smartphone Only Model\")\n plt.legend()\n plt.show()\n\n # plt.plot(hist_b_data, label=\"Trainingsdaten\")\n plt.plot(hist_b_val_data, label=\"Validierungsdaten\")\n plt.xlabel(\"Epochen\")\n plt.ylabel(\"Fehlerwert\")\n plt.title(\"Fehlerwerte des Beispielnetzes\")\n plt.legend()\n plt.show()\n \"\"\"\n until = 200\n\n hist_a_val_data_smooth = gaussian_filter1d(hist_a_val_data, sigma=1)[:until]\n hist_a_val_data_flat_smooth = gaussian_filter1d(hist_a_val_data_flat, sigma=1)[:until]\n hist_b_val_data_smooth = gaussian_filter1d(hist_b_val_data, sigma=1)[:until]\n\n for idx, res in enumerate(so_results):\n so_results[idx] *= 100\n so_flat_results[idx] *= 100\n show_off_results[idx] *= 100\n\n plt.title(\"Fehlerwerte auf Validierungsdaten\")\n \"\"\"\n plt.subplot(3, 2, 1)\n plt.plot(hist_a_val_data[:until], color=\"red\")\n plt.ylim(0)\n \"\"\"\n plt.subplot(3, 1, 1)\n plt.plot(so_results[:until], color=\"red\")\n plt.ylabel(\"Gerundet\")\n plt.ylim(0, 100)\n \"\"\"\n plt.subplot(3, 2, 3)\n plt.plot(hist_a_val_data_flat[:until], color=\"red\")\n plt.ylim(0)\n \"\"\"\n plt.subplot(3, 1, 2)\n plt.plot(so_flat_results[:until], color=\"red\")\n plt.ylabel(\"Geplättet\")\n plt.ylim(0, 100)\n \"\"\"\n plt.subplot(3, 2, 5)\n plt.plot(hist_b_val_data[:until], color=\"darkblue\")\n plt.ylim(0)\n \"\"\"\n plt.subplot(3, 1, 3)\n plt.plot(show_off_results[:until], color=\"darkblue\")\n plt.ylabel(\"2D Kategorisierung\")\n plt.ylim(0, 100)\n\n plt.xlabel(\"Epochen\")\n plt.show()\n\n\ntrain()\nshow()\n","sub_path":"models/ff2/train_models.py","file_name":"train_models.py","file_ext":"py","file_size_in_byte":8182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"123647293","text":"import os\nimport h5py\nimport torch\nimport copy\n\nfrom random import shuffle\nfrom tensorboardX import SummaryWriter\nfrom torch.autograd import Variable\nfrom torch.utils.data import Dataset, DataLoader\nfrom sklearn.metrics import precision_recall_curve\nfrom scipy.ndimage.interpolation import map_coordinates\nfrom scipy.interpolate import RectBivariateSpline\nfrom scipy import interpolate\n\nimport skimage as sk\nimport pickle as pkl\nimport skimage.morphology\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport numpy as np\nfrom .utils import pandas_hdf5_handler,kymo_handle\n\n#y,x,t -> k*t,1,y,x\n\nfrom matplotlib import pyplot as plt\n\nclass data_augmentation:\n def __init__(self,p_flip=0.5,max_rot=15,min_padding=20):\n self.p_flip = p_flip\n self.max_rot = max_rot\n self.min_padding = min_padding\n \n# def make_chunked_kymograph(self,img_arr,chunksize=10):\n# pad = (chunksize - (img_arr.shape[2]%chunksize))*img_arr.shape[1]\n# chunked_arr = np.swapaxes(img_arr,1,2)\n# chunked_arr = chunked_arr.reshape(chunked_arr.shape[0],-1)\n# chunked_arr = np.pad(chunked_arr,((0,0),(0,pad)),'constant',constant_values=0)\n# chunked_arr = chunked_arr.reshape(chunked_arr.shape[0],-1,img_arr.shape[1]*chunksize)\n# chunked_arr = np.swapaxes(chunked_arr,1,2)\n# return chunked_arr\n \n def random_crop(self,img_arr,seg_arr):\n false_arr = np.zeros(img_arr.shape[2:4],dtype=bool)\n random_crop_len_y = np.random.uniform(low=0.1,high=1.,size=(1,img_arr.shape[0]))\n random_crop_len_x = np.random.uniform(low=0.4,high=1.,size=(1,img_arr.shape[0]))\n \n random_crop_len = np.concatenate([random_crop_len_y,random_crop_len_x],axis=0)\n \n random_crop_remainder = 1.-random_crop_len\n random_crop_start = (np.random.uniform(low=0.,high=1.,size=(2,img_arr.shape[0])))*random_crop_remainder\n low_crop = np.floor(random_crop_start*np.array(img_arr.shape[2:4])[:,np.newaxis]).astype('int32')\n high_crop = np.floor(low_crop+(random_crop_len*np.array(img_arr.shape[2:4])[:,np.newaxis])).astype('int32')\n# random_low_samples = np.random.uniform(low=0.,high=0.5,size=(2,img_arr.shape[0]))\n# low_crop = (random_low_samples*np.array(img_arr.shape[2:4])[:,np.newaxis]).astype('int32')\n# remainder = np.array(img_arr.shape[2:4])[:,np.newaxis]-low_crop\n# random_high_samples = np.random.uniform(low=0.5,high=1.,size=(2,img_arr.shape[0]))\n# high_crop = np.floor(random_high_samples*remainder).astype('int32')+low_crop\n out_arr = []\n out_seg_arr = []\n center = (img_arr.shape[2]//2,img_arr.shape[3]//2)\n for t in range(img_arr.shape[0]):\n mask = copy.copy(false_arr)\n working_arr = copy.copy(img_arr[t,0,:,:])\n working_seg_arr = copy.copy(seg_arr[t,:,:])\n\n dim_0_range = (high_crop[0,t] - low_crop[0,t])\n dim_1_range = high_crop[1,t] - low_crop[1,t]\n top_left = (center[0]-dim_0_range//2,center[1]-dim_1_range//2)\n\n dim_0_maxscale = img_arr.shape[2]/dim_0_range\n dim_1_maxscale = img_arr.shape[3]/dim_1_range\n\n dim_0_scale = np.clip(np.random.normal(loc=1.0,scale=0.1),0.8,dim_0_maxscale)\n dim_1_scale = np.clip(np.random.normal(loc=1.0,scale=0.1),0.8,dim_1_maxscale)\n\n rescaled_img = sk.transform.rescale(working_arr[low_crop[0,t]:high_crop[0,t],low_crop[1,t]:high_crop[1,t]],(dim_0_scale,dim_1_scale),preserve_range=True).astype(int)\n rescaled_seg = (sk.transform.rescale(working_seg_arr[low_crop[0,t]:high_crop[0,t],low_crop[1,t]:high_crop[1,t]]==1,(dim_0_scale,dim_1_scale))>0.5).astype(\"int8\")\n rescaled_border = (sk.transform.rescale(working_seg_arr[low_crop[0,t]:high_crop[0,t],low_crop[1,t]:high_crop[1,t]]==2,(dim_0_scale,dim_1_scale))>0.5)\n rescaled_seg[rescaled_border] = 2\n# rot_seg_arr[t,rot_cell] = 1\n# rot_seg_arr[t,rot_border] = 2\n \n# rescaled_seg = (sk.transform.rescale(working_seg_arr[low_crop[0,t]:high_crop[0,t],low_crop[1,t]:high_crop[1,t]],(dim_0_scale,dim_1_scale),preserve_range=True)>0.5)\n\n top_left = (center[0]-rescaled_img.shape[0]//2,center[1]-rescaled_img.shape[1]//2) \n working_arr[top_left[0]:top_left[0]+rescaled_img.shape[0],top_left[1]:top_left[1]+rescaled_img.shape[1]] = rescaled_img\n working_seg_arr[top_left[0]:top_left[0]+rescaled_img.shape[0],top_left[1]:top_left[1]+rescaled_img.shape[1]] = rescaled_seg\n\n mask[top_left[0]:top_left[0]+rescaled_img.shape[0],top_left[1]:top_left[1]+rescaled_img.shape[1]] = True\n working_arr[~mask] = 0\n working_seg_arr[~mask] = False \n \n out_arr.append(working_arr)\n out_seg_arr.append(working_seg_arr)\n out_arr = np.expand_dims(np.array(out_arr),1)\n out_seg_arr = np.array(out_seg_arr)\n# out_seg_arr = np.expand_dims(np.array(out_seg_arr),1)\n# out_arr = np.moveaxis(np.array(out_arr),(0,1,2),(2,0,1))\n# out_seg_arr = np.moveaxis(np.array(out_seg_arr),(0,1,2),(2,0,1))\n return out_arr,out_seg_arr\n \n# def random_crop(self,img_arr,seg_arr):\n# false_arr = np.zeros(img_arr.shape[:2],dtype=bool)\n# random_low_samples = np.random.uniform(low=0.,high=0.8,size=(2,img_arr.shape[-1]))\n# low_crop = (random_low_samples*np.array(img_arr.shape[:2])[:,np.newaxis]).astype('uint16')\n# remainder = np.array(img_arr.shape[:2])[:,np.newaxis]-low_crop\n# random_high_samples = np.random.uniform(low=0.2,high=1.,size=(2,img_arr.shape[-1]))\n# high_crop = np.floor(random_high_samples*remainder).astype('uint16')+low_crop\n# out_arr = []\n# out_seg_arr = []\n# for t in range(img_arr.shape[2]):\n# mask = copy.copy(false_arr)\n# working_arr = copy.copy(img_arr[:,:,t])\n# working_seg_arr = copy.copy(seg_arr[:,:,t])\n# mask[low_crop[0,t]:high_crop[0,t],low_crop[1,t]:high_crop[1,t]] = True\n# working_arr[~mask] = 0\n# working_seg_arr[~mask] = False\n# out_arr.append(working_arr)\n# out_seg_arr.append(working_seg_arr)\n# out_arr = np.moveaxis(np.array(out_arr),(0,1,2),(2,0,1))\n# out_seg_arr = np.moveaxis(np.array(out_seg_arr),(0,1,2),(2,0,1))\n# return out_arr,out_seg_arr\n \n def random_x_flip(self,img_arr,seg_arr,p=0.5):\n choices = np.random.choice(np.array([True,False]),size=img_arr.shape[0],p=np.array([p,1.-p]))\n out_img_arr = copy.copy(img_arr)\n out_seg_arr = copy.copy(seg_arr)\n out_img_arr[choices,0,:,:] = np.flip(img_arr[choices,0,:,:],axis=1)\n out_seg_arr[choices,:,:] = np.flip(seg_arr[choices,:,:],axis=1)\n return out_img_arr,out_seg_arr\n def random_y_flip(self,img_arr,seg_arr,p=0.5):\n choices = np.random.choice(np.array([True,False]),size=img_arr.shape[0],p=np.array([p,1.-p]))\n out_img_arr = copy.copy(img_arr)\n out_seg_arr = copy.copy(seg_arr)\n out_img_arr[choices,0,:,:] = np.flip(img_arr[choices,0,:,:],axis=2)\n out_seg_arr[choices,:,:] = np.flip(seg_arr[choices,:,:],axis=2)\n return out_img_arr,out_seg_arr\n \n def change_brightness(self,img_arr,num_control_points=3):\n out_img_arr = copy.copy(img_arr)\n for t in range(img_arr.shape[0]):\n control_points = (np.add.accumulate(np.ones(num_control_points+2))-1.)/(num_control_points+1)\n control_point_locations = (control_points*65535).astype(int)\n orig_locations = copy.copy(control_point_locations)\n random_points = np.random.uniform(low=0,high=65535,size=num_control_points).astype(int)\n sorted_points = np.sort(random_points)\n control_point_locations[1:-1] = sorted_points\n mapping = interpolate.PchipInterpolator(orig_locations, control_point_locations)\n out_img_arr[t,0,:,:] = mapping(img_arr[t,0,:,:])\n return out_img_arr\n \n \n def add_padding(self,img_arr,seg_arr,max_rot=20,min_padding=20):\n hyp_length = np.ceil((img_arr.shape[2]**2+img_arr.shape[3]**2)**(1/2)).astype(int)\n max_rads = ((90-max_rot)/360)*(2*np.pi)\n min_rads = (90/360)*(2*np.pi)\n max_y = np.maximum(np.ceil(hyp_length*np.sin(max_rads)),np.ceil(hyp_length*np.sin(min_rads))).astype(int)\n max_x = np.maximum(np.ceil(hyp_length*np.cos(max_rads)),np.ceil(hyp_length*np.cos(min_rads))).astype(int)\n delta_y = max_y-img_arr.shape[2]\n delta_x = max_x-img_arr.shape[3]\n if delta_x % 2 == 1:\n delta_x+=1\n if delta_y % 2 == 1:\n delta_y+=1\n delta_y = np.maximum(delta_y,2*min_padding)\n delta_x = np.maximum(delta_x,2*min_padding)\n padded_img_arr = np.pad(img_arr, ((0,0),(0,0),(delta_y//2,delta_y//2),(delta_x//2,delta_x//2)), 'constant', constant_values=0)\n padded_seg_arr = np.pad(seg_arr, ((0,0),(delta_y//2,delta_y//2),(delta_x//2,delta_x//2)), 'constant', constant_values=0)\n return padded_img_arr,padded_seg_arr\n \n def translate(self,pad_img_arr,pad_seg_arr,img_arr,seg_arr):\n trans_img_arr = copy.copy(pad_img_arr)\n trans_seg_arr = copy.copy(pad_seg_arr)\n delta_y = pad_img_arr.shape[2] - img_arr.shape[2]\n delta_x = pad_img_arr.shape[3] - img_arr.shape[3]\n for t in range(pad_img_arr.shape[0]):\n trans_y = np.random.randint(-(delta_y//2),high=delta_y//2)\n trans_x = np.random.randint(-(delta_x//2),high=delta_x//2)\n trans_img_arr[t,0,delta_y//2:delta_y//2+img_arr.shape[0],delta_x//2:delta_x//2+img_arr.shape[1]] = 0\n trans_seg_arr[t,delta_y//2:delta_y//2+img_arr.shape[0],delta_x//2:delta_x//2+img_arr.shape[1]] = 0\n trans_img_arr[t,0,delta_y//2+trans_y:delta_y//2+img_arr.shape[0]+trans_y,delta_x//2+trans_x:delta_x//2+img_arr.shape[1]+trans_x] = pad_img_arr[t,0,delta_y//2:delta_y//2+img_arr.shape[0],delta_x//2:delta_x//2+img_arr.shape[1]]\n trans_seg_arr[t,delta_y//2+trans_y:delta_y//2+img_arr.shape[0]+trans_y,delta_x//2+trans_x:delta_x//2+img_arr.shape[1]+trans_x] = pad_seg_arr[t,delta_y//2:delta_y//2+img_arr.shape[0],delta_x//2:delta_x//2+img_arr.shape[1]]\n return trans_img_arr,trans_seg_arr\n\n# def add_padding(self,img_arr,seg_arr):\n# hyp_length = np.ceil((img_arr.shape[0]**2+img_arr.shape[1]**2)**(1/2)).astype(int)\n# delta_y = hyp_length-img_arr.shape[0]\n# delta_x = hyp_length-img_arr.shape[1]\n# if delta_x % 2 == 1:\n# delta_x+=1\n# if delta_y % 2 == 1:\n# delta_y+=1\n# padded_img_arr = np.pad(img_arr, ((delta_y//2,delta_y//2),(delta_x//2,delta_x//2),(0,0)), 'constant', constant_values=0)\n# padded_seg_arr = np.pad(seg_arr, ((delta_y//2,delta_y//2),(delta_x//2,delta_x//2),(0,0)), 'constant', constant_values=0)\n# return padded_img_arr,padded_seg_arr\n \n \n def rotate(self,img_arr,seg_arr,max_rot=20):\n rot_img_arr = copy.copy(img_arr)\n rot_seg_arr = copy.copy(seg_arr)\n for t in range(img_arr.shape[0]):\n r = np.random.uniform(low=-max_rot,high=max_rot)\n rot_img_arr[t,0,:,:] = sk.transform.rotate(img_arr[t,0,:,:],r,preserve_range=True).astype(\"int32\")\n rot_seg = (sk.transform.rotate(seg_arr[t,:,:]==1,r)>0.5).astype(\"int8\")\n rot_border = sk.transform.rotate(seg_arr[t,:,:]==2,r)>0.5\n rot_seg[rot_border] = 2\n rot_seg_arr[t,:,:] = rot_seg\n return rot_img_arr,rot_seg_arr\n \n# def rotate(img_arr,seg_arr):\n# rot_img_arr = copy.copy(img_arr)\n# rot_seg_arr = copy.copy(seg_arr)\n# for t in range(img_arr.shape[2]):\n# r = np.random.normal(loc=0,scale=60)\n# rot_img_arr[:,:,t] = sk.transform.rotate(img_arr[:,:,t],r,preserve_range=True).astype(\"uint16\")\n# rot_seg_arr[:,:,t] = sk.transform.rotate(seg_arr[:,:,t],r).astype(bool)\n# return rot_img_arr,rot_seg_arr\n\n# def deform_img_arr(self,img_arr,seg_arr):\n# y_steps = np.linspace(0.,4.,num=img_arr.shape[0])\n# x_steps = np.linspace(0.,4.,num=img_arr.shape[1])\n# grid = np.random.normal(scale=10.,size=(2,4,4))\n# dx = RectBivariateSpline(np.arange(4),np.arange(4),grid[0]).ev(y_steps[np.newaxis,:],x_steps[:,np.newaxis])\n# dy = RectBivariateSpline(np.arange(4),np.arange(4),grid[1]).ev(y_steps[np.newaxis,:],x_steps[:,np.newaxis])\n# y,x = np.meshgrid(np.arange(img_arr.shape[0]), np.arange(img_arr.shape[1]), indexing='ij')\n# indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1))\n# def_img_arr = copy.copy(img_arr)\n# def_seg_arr = copy.copy(seg_arr)\n# for t in range(img_arr.shape[2]):\n# def_img_arr[:,:,t] = map_coordinates(img_arr[:,:,t], indices, order=1).reshape(img_arr.shape[:2])\n# def_seg_arr[:,:,t] = map_coordinates(seg_arr[:,:,t], indices, order=1).reshape(seg_arr.shape[:2])\n# def_seg_arr[:,:,t] = sk.morphology.binary_closing(def_seg_arr[:,:,t])\n# return def_img_arr,def_seg_arr\n\n def deform_img_arr(self,img_arr,seg_arr):\n def_img_arr = copy.copy(img_arr)\n def_seg_arr = copy.copy(seg_arr)\n for t in range(img_arr.shape[0]):\n y_steps = np.linspace(0.,4.,num=img_arr.shape[2])\n x_steps = np.linspace(0.,4.,num=img_arr.shape[3])\n grid = np.random.normal(scale=1.,size=(2,4,4))\n dx = RectBivariateSpline(np.arange(4),np.arange(4),grid[0]).ev(y_steps[:,np.newaxis],x_steps[np.newaxis,:])\n dy = RectBivariateSpline(np.arange(4),np.arange(4),grid[1]).ev(y_steps[:,np.newaxis],x_steps[np.newaxis,:])\n y,x = np.meshgrid(np.arange(img_arr.shape[2]), np.arange(img_arr.shape[3]), indexing='ij')\n indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1))\n elastic_img = map_coordinates(img_arr[t,0,:,:], indices, order=1).reshape(img_arr.shape[2:4])\n \n def_img_arr[t,0,:,:] = elastic_img\n \n elastic_cell = (map_coordinates(seg_arr[t,:,:]==1, indices, order=1).reshape(seg_arr.shape[1:3])>0.5)\n elastic_cell = sk.morphology.binary_closing(elastic_cell)\n elastic_border = (map_coordinates(seg_arr[t,:,:]==2, indices, order=1).reshape(seg_arr.shape[1:3])>0.5)\n def_seg_arr[t,elastic_cell] = 1\n def_seg_arr[t,elastic_border] = 2\n# elastic_seg = (map_coordinates(seg_arr[t,:,:].astype(bool), indices, order=1).reshape(seg_arr.shape[1:3])>0.5)\n \n \n# elastic_seg = sk.morphology.binary_closing(elastic_seg).astype(\"int8\")\n# def_img_arr[t,0,:,:],def_seg_arr[t,:,:] = (elastic_img,elastic_seg)\n return def_img_arr,def_seg_arr\n\n \n def get_augmented_data(self,img_arr,seg_arr):\n# img_arr = self.make_chunked_kymograph(img_arr,chunksize=self.chunksize)\n# seg_arr = self.make_chunked_kymograph(seg_arr,chunksize=self.chunksize)\n img_arr,seg_arr = self.random_crop(img_arr,seg_arr)\n img_arr,seg_arr = self.random_x_flip(img_arr,seg_arr,p=self.p_flip)\n img_arr,seg_arr = self.random_y_flip(img_arr,seg_arr,p=self.p_flip)\n img_arr = self.change_brightness(img_arr)\n pad_img_arr,pad_seg_arr = self.add_padding(img_arr,seg_arr,max_rot=self.max_rot+5)\n img_arr,seg_arr = self.translate(pad_img_arr,pad_seg_arr,img_arr,seg_arr)\n del pad_img_arr\n del pad_seg_arr\n img_arr,seg_arr = self.rotate(img_arr,seg_arr,max_rot=self.max_rot)\n img_arr,seg_arr = self.deform_img_arr(img_arr,seg_arr)\n img_arr,seg_arr = (img_arr.astype(\"int32\"),seg_arr.astype(\"int8\"))\n return img_arr,seg_arr\n\nclass UNet_Training_DataLoader:\n def __init__(self,headpath,seg_channel,num_trenches_per_fov):\n self.headpath = headpath\n self.kymopath = headpath + \"/kymo\"\n self.segpath = headpath + \"/segmentation\"\n self.nnpath = headpath + \"/nn\"\n self.nnoutputpath = headpath + \"/nnsegmentation\"\n self.metapath = headpath + \"/metadata.hdf5\"\n self.seg_channel = seg_channel\n self.num_trenches_per_fov = num_trenches_per_fov\n \n# self.data_augmentor = data_augmentation(chunksize=chunksize,p_crop=p_crop,p_flip=p_flip,p_brightness=p_brightness,p_rotate=p_rotate,p_deform=p_deform)\n \n def writedir(self,directory,overwrite=False):\n \"\"\"Creates an empty directory at the specified location. If a directory is\n already at this location, it will be overwritten if 'overwrite' is true,\n otherwise it will be left alone.\n \n Args:\n directory (str): Path to directory to be overwritten/created.\n overwrite (bool, optional): Whether to overwrite a directory that\n already exists in this location.\n \"\"\"\n if overwrite:\n if os.path.exists(directory):\n shutil.rmtree(directory)\n os.makedirs(directory)\n else:\n if not os.path.exists(directory):\n os.makedirs(directory)\n \n def get_ttv_lists(self,ttv_split):\n \n meta_handle = pandas_hdf5_handler(self.metapath)\n kymo_handle = meta_handle.read_df(\"kymo\")\n fov_arr = kymo_handle.index.get_level_values('fov').unique().values\n trench_dict = {fov:len(kymo_handle.loc[fov].index.get_level_values('trench').unique().values) for fov in fov_arr}\n np.random.shuffle(fov_arr)\n fov_list = list(fov_arr)\n trench_count_arr = np.array([trench_dict[fov] for fov in fov_list])\n trench_count_arr[trench_count_arr>self.num_trenches_per_fov] = self.num_trenches_per_fov\n ttl_counts = np.sum(trench_count_arr)\n\n ttv_split = np.array(ttv_split)\n ttv_counts = (ttv_split*ttl_counts).astype(int)\n ttv_accum = np.add.accumulate(ttv_counts)\n trench_count_accum = np.add.accumulate(trench_count_arr)\n\n train_mask = trench_count_accumself.num_trenches_per_fov] = self.num_trenches_per_fov\n trench_count_accum = np.add.accumulate(trench_count_arr)\n ttl_count = np.sum(trench_count_arr) \n \n# num_pos = 0\n# ttl_num = 0\n\n with h5py.File(writepath,\"w\") as outfile:\n for idx,fov in enumerate(fov_list):\n print(fov)\n img_path = self.kymopath + \"/kymo_\" + str(fov) + \".hdf5\"\n seg_path = self.segpath + \"/seg_\" + str(fov) + \".hdf5\"\n\n img_data = []\n seg_data = []\n \n with h5py.File(img_path,\"r\") as imgfile:\n with h5py.File(seg_path,\"r\") as segfile:\n trenchids = list(imgfile.keys())\n shuffle(trenchids)\n trenchids = trenchids[:trench_count_arr[idx+1]]\n \n for i,trenchid in enumerate(trenchids):\n img_arr = imgfile[trenchid+\"/\"+seg_channel][:]\n seg_arr = segfile[trenchid][:]\n# aug_img_arr,aug_seg_arr = self.data_augmentor.get_augmented_data(img_arr,seg_arr)\n# num_pos += np.sum(seg_arr)\n# ttl_num += seg_arr.size\n if idx == 0 and i == 0:\n t_dim = img_arr.shape[2]\n out_shape = (ttl_count*t_dim,1,img_arr.shape[0],img_arr.shape[1])\n chunk_shape = (1,1,img_arr.shape[0],img_arr.shape[1])\n img_handle = outfile.create_dataset(\"img\",out_shape,chunks=chunk_shape,dtype='int32')\n seg_handle = outfile.create_dataset(\"seg\",out_shape,chunks=chunk_shape,dtype='int8')\n \n img_arr = np.moveaxis(img_arr[np.newaxis,:,:,:],(0,1,2,3),(1,2,3,0)) #y,x,t -> k*t,1,y,x\n img_arr = img_arr.astype('int32')\n img_handle[(trench_count_accum[idx]*t_dim)+i*t_dim:(trench_count_accum[idx]*t_dim)+(i+1)*t_dim] = img_arr\n \n seg_arr = np.moveaxis(seg_arr[np.newaxis,:,:,:],(0,1,2,3),(1,2,3,0)) #y,x,t -> k*t,1,y,x\n seg_arr = seg_arr.astype('int8')\n seg_handle[(trench_count_accum[idx]*t_dim)+i*t_dim:(trench_count_accum[idx]*t_dim)+(i+1)*t_dim] = seg_arr\n \n# class_arr = np.array([num_pos, ttl_num])\n# return class_arr\n \n \n def prepare_training_data(self,ttv_split):\n \n self.writedir(self.nnpath,overwrite=False)\n train,test,val,trench_dict = self.get_ttv_lists(ttv_split)\n self.merge_fovs(\"train.hdf5\",train,trench_dict,self.seg_channel)\n print(\"Done writing train.hdf5\")\n self.merge_fovs(\"test.hdf5\",test,trench_dict,self.seg_channel)\n print(\"Done writing test.hdf5\")\n self.merge_fovs(\"val.hdf5\",val,trench_dict,self.seg_channel)\n print(\"Done writing val.hdf5\")\n# print(ttl_classes)\n# with open(self.nnpath + \"/classfile.pkl\", 'wb') as outfile:\n# pkl.dump(ttl_classes, outfile)\n print(\"Done writing data\")\n \n \nclass UNet_DataLoader:\n def __init__(self,headpath,seg_channel):\n self.headpath = headpath\n self.kymopath = headpath + \"/kymo\"\n self.segpath = headpath + \"/segmentation\"\n self.nnpath = headpath + \"/nn\"\n self.nnoutputpath = headpath + \"/nnsegmentation\"\n self.metapath = headpath + \"/metadata.hdf5\"\n self.seg_channel = seg_channel\n \n def writedir(self,directory,overwrite=False):\n \"\"\"Creates an empty directory at the specified location. If a directory is\n already at this location, it will be overwritten if 'overwrite' is true,\n otherwise it will be left alone.\n \n Args:\n directory (str): Path to directory to be overwritten/created.\n overwrite (bool, optional): Whether to overwrite a directory that\n already exists in this location.\n \"\"\"\n if overwrite:\n if os.path.exists(directory):\n shutil.rmtree(directory)\n os.makedirs(directory)\n else:\n if not os.path.exists(directory):\n os.makedirs(directory)\n \n def prepare_data(self,fov_num):\n self.writedir(self.nnoutputpath,overwrite=False)\n writepath = self.nnoutputpath + \"/nninput_\" + str(fov_num) + \".hdf5\"\n img_path = self.kymopath + \"/kymo_\" + str(fov_num) + \".hdf5\"\n with h5py.File(writepath,\"w\") as outfile:\n with h5py.File(img_path,\"r\") as infile:\n keys = list(infile.keys())\n ex_data = infile[keys[0]+\"/\"+self.seg_channel]\n out_shape = (len(keys)*ex_data.shape[2],1,ex_data.shape[0],ex_data.shape[1])\n chunk_shape = (1,1,out_shape[2],out_shape[3])\n img_handle = outfile.create_dataset(\"img\",out_shape,chunks=chunk_shape,dtype=float)\n \n for i,trenchid in enumerate(keys):\n img_arr = infile[trenchid+\"/\"+self.seg_channel][:]\n img_arr = np.moveaxis(img_arr,(0,1,2),(1,2,0))\n img_arr = np.expand_dims(img_arr,1)\n img_arr = img_arr.astype(float)\n \n img_handle[i*ex_data.shape[2]:(i+1)*ex_data.shape[2]] = img_arr\n \n def postprocess(self,fov_num,threshold=0.5):\n threshold = 0.5\n nninputpath = self.nnoutputpath + \"/nninput_\" + str(fov_num) + \".hdf5\"\n nnoutputpath = self.nnoutputpath + \"/nnoutput_\" + str(fov_num) + \".hdf5\"\n segpath = self.nnoutputpath + \"/seg_\" + str(fov_num) + \".hdf5\"\n kymopath = self.kymopath + \"/kymo_\" + str(fov_num) + \".hdf5\"\n with h5py.File(kymopath,\"r\") as kymofile:\n trench_num = len(kymofile.keys())\n trenchids = list(kymofile.keys())\n with h5py.File(segpath,\"w\") as outfile:\n with h5py.File(nnoutputpath,\"r\") as infile:\n num_img = infile[\"img\"].shape[0]\n y_shape,x_shape = (infile[\"img\"].shape[2],infile[\"img\"].shape[3])\n timepoints = int(num_img/trench_num)\n for trench in range(trench_num):\n trenchid = trenchids[trench]\n trench_arr = (infile[\"img\"][trench*timepoints:(trench+1)*timepoints,0]>threshold)\n trench_arr = np.moveaxis(trench_arr,(0,1,2),(2,0,1))\n outdset = outfile.create_dataset(trenchid, data=trench_arr, chunks=(y_shape,x_shape,1), dtype=bool)\n os.remove(nninputpath)\n os.remove(nnoutputpath)\n \n \nclass SegmentationDataset(Dataset):\n def __init__(self,filepath,training=False):\n self.filepath = filepath\n self.training = training\n with h5py.File(self.filepath,\"r\") as infile:\n self.shape = infile[\"img\"].shape\n self.img_data = infile[\"img\"][:]\n if self.training:\n self.seg_data = infile[\"seg\"][:]\n def __len__(self):\n with h5py.File(self.filepath,\"r\") as infile:\n out_len = infile[\"img\"].shape[0]\n return out_len\n def __getitem__(self,idx):\n if self.training:\n sample = {'img': self.img_data[idx], 'seg': self.seg_data[idx]}\n else:\n sample = {'img': self.img_data[idx]}\n return sample\n# class SegmentationDataset(Dataset):\n# def __init__(self,filepath,training=False):\n# self.filepath = filepath\n# self.training = training\n# with h5py.File(self.filepath,\"r\") as infile:\n# self.shape = infile[\"img\"].shape\n# def __len__(self):\n# with h5py.File(self.filepath,\"r\") as infile:\n# out_len = infile[\"img\"].shape[0]\n# return out_len\n# def __getitem__(self,idx):\n# with h5py.File(self.filepath,\"r\") as infile:\n# if self.training:\n# sample = {'img': infile[\"img\"][idx], 'seg': infile[\"seg\"][idx]}\n# else:\n# sample = {'img': infile[\"img\"][idx]}\n# return sample\n\n#https://github.com/milesial/Pytorch-UNet/blob/master/unet/unet_model.py\n\nclass double_conv(nn.Module):\n '''(Conv => BatchNorm =>ReLU) twice'''\n def __init__(self,in_ch,out_ch):\n super().__init__()\n self.conv = nn.Sequential(\n nn.Conv2d(in_ch, out_ch, 3, padding=1),\n nn.BatchNorm2d(out_ch),\n nn.ReLU(inplace=True),\n nn.Conv2d(out_ch, out_ch, 3, padding=1),\n nn.BatchNorm2d(out_ch),\n nn.ReLU(inplace=True)\n )\n \n def forward(self,x):\n x = self.conv(x)\n return x\n\nclass inconv(nn.Module):\n def __init__(self, in_ch, out_ch):\n super().__init__()\n self.conv = double_conv(in_ch, out_ch)\n\n def forward(self, x):\n x = self.conv(x)\n return x\n \nclass down(nn.Module):\n def __init__(self, in_ch, out_ch):\n super().__init__()\n self.downconv = nn.Sequential(\n nn.MaxPool2d(2),\n double_conv(in_ch, out_ch))\n\n def forward(self, x):\n x = self.downconv(x)\n return x\n \nclass up(nn.Module):\n def __init__(self, in_ch, out_ch, bilinear=False):\n super().__init__()\n\n # would be a nice idea if the upsampling could be learned too,\n # but my machine do not have enough memory to handle all those weights\n if bilinear:\n self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)\n else:\n self.up = nn.ConvTranspose2d(in_ch//2, in_ch//2, 2, stride=2)\n self.conv = double_conv(in_ch, out_ch)\n \n def forward(self, x1, x2):\n x1 = self.up(x1)\n \n # input is CHW\n diffY = x2.size()[2] - x1.size()[2]\n diffX = x2.size()[3] - x1.size()[3]\n\n x1 = F.pad(x1, (diffX // 2, diffX - diffX//2,\n diffY // 2, diffY - diffY//2))\n \n # for padding issues, see \n # https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a\n # https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd\n\n x = torch.cat([x2, x1], dim=1)\n x = self.conv(x)\n return x\nclass outconv(nn.Module):\n def __init__(self, in_ch, out_ch):\n super().__init__()\n self.conv = nn.Conv2d(in_ch, out_ch, 1)\n\n def forward(self, x):\n x = self.conv(x)\n return x\n \nclass UNet(nn.Module):\n def __init__(self,n_channels,n_classes,layers=3,hidden_size=64,dropout=0.,withsoftmax=False):\n super().__init__()\n self.inc = inconv(n_channels, hidden_size)\n self.downlist = nn.ModuleList([down(hidden_size*(2**i), hidden_size*(2**(i+1))) for i in range(0,layers-1)] + [down(hidden_size*(2**(layers-1)), hidden_size*(2**(layers-1)))])\n self.uplist = nn.ModuleList([up(hidden_size*(2**i), hidden_size*(2**(i-2))) for i in reversed(range(2,layers+1))] + [up(hidden_size*2, hidden_size)])\n self.outc = outconv(hidden_size, n_classes)\n self.drop = nn.Dropout(p=dropout)\n self.withsoftmax = withsoftmax\n def uniforminit(self):\n for param in self.named_parameters():\n param[1].data.uniform_(-0.05,0.05)\n def forward(self, x):\n xlist = [self.inc(x)]\n for item in self.downlist:\n xlist.append(item(xlist[-1]))\n x = xlist[-1]\n x = self.drop(x)\n for i,item in enumerate(self.uplist):\n x = item(x, xlist[-(i+2)])\n x = self.outc(x)\n if self.withsoftmax:\n x = F.softmax(x,dim=1)\n return x\n \n \nclass UNet_Trainer:\n \n def __init__(self,headpath,layers=3,hidden_size=64,lr=0.005,momentum=0.9,weight_decay=0.0005,dropout=0.,batch_size=100,gpuon=False,saveparams=False,writetotb=False,augment=True,p_flip=0.5,max_rot=15,min_padding=20):\n self.headpath = headpath\n self.nnpath = headpath + \"/nn\"\n self.nnoutputpath = headpath + \"/nnsegmentation\"\n \n self.batch_size = batch_size\n self.gpuon = gpuon\n self.saveparams = saveparams\n self.writetotb = writetotb\n self.augment = augment\n \n if augment:\n self.data_augmentor = data_augmentation(p_flip=p_flip,max_rot=max_rot,min_padding=min_padding)\n \n self.layers = layers\n self.hidden_size = hidden_size\n self.dropout = dropout\n self.lr = lr\n self.momentum = momentum\n \n if writetotb:\n self.writer = SummaryWriter('runs/layers='+str(layers)+'_hidden_size='+str(hidden_size)+'_dropout='+str(dropout)+'_lr='+str(lr)+'_momentum='+str(momentum))\n \n self.model = UNet(1,3,layers=layers,hidden_size=hidden_size,dropout=dropout)\n self.model.uniforminit()\n if gpuon:\n self.model = self.model.cuda()\n \n# with open(self.nnpath + \"/classfile.pkl\", 'rb') as outfile:\n# class_arr = pkl.load(outfile)\n \n# pos_ex = class_arr[0]\n# neg_ex = class_arr[1] - pos_ex\n# ratio = torch.Tensor(np.array(neg_ex/pos_ex))\n# print(ratio)\n# self.loss_fn = nn.BCEWithLogitsLoss(reduction='none', pos_weight=ratio)\n self.optimizer = optim.SGD(self.model.parameters(), lr = lr,momentum=momentum,weight_decay=weight_decay)\n \n def load_model(self,paramspath):\n if self.gpuon:\n device = torch.device(\"cuda\")\n self.model.load_state_dict(torch.load(paramspath))\n else:\n device = torch.device('cpu')\n self.model.load_state_dict(torch.load(paramspath, map_location=device))\n \n def train(self,x,y,weights):\n self.optimizer.zero_grad()\n fx = self.model.forward(x)\n nll = F.cross_entropy(fx,y,reduction='none',weight=weights)\n mean_nll = torch.mean(nll)\n mean_nll.backward()\n self.optimizer.step()\n return mean_nll\n\n def test(self,x,y,weights):\n fx = self.model.forward(x)\n nll = F.cross_entropy(fx,y,reduction='none',weight=weights)\n nll = torch.sum(nll)\n return nll\n\n def perepoch(self,e,train_iter,val_iter,train_data_shape,val_data_shape):\n \n \n# pos_ex = class_arr[0]\n# neg_ex = class_arr[1] - pos_ex\n# ratio = torch.Tensor(np.array(neg_ex/pos_ex))\n# print(ratio)\n# self.loss_fn = nn.BCEWithLogitsLoss(reduction='none', pos_weight=ratio)\n \n \n print('=======epoch ' + str(e) + '=======')\n self.model.train()\n num_train_batches = len(train_iter)\n for i,b in enumerate(train_iter):\n img_arr,seg_arr = (b['img'].numpy(),b['seg'].numpy())\n seg_arr = seg_arr[:,0] #dirty fix move to data prep when scratch stabalizes\n for t in range(seg_arr.shape[0]):\n# border = sk.morphology.binary_dilation(seg_arr[t])^sk.morphology.binary_erosion(seg_arr[t])\n binary = seg_arr[t].astype(bool)\n dilated = sk.morphology.binary_dilation(binary)\n dilated = sk.morphology.binary_dilation(dilated)\n border = dilated^binary\n# inner = sk.morphology.binary_erosion(seg_arr[t])^seg_arr[t]\n# border = outer+inner\n seg_arr[t,border] = 2\n\n \n \n if self.augment:\n img_arr,seg_arr = self.data_augmentor.get_augmented_data(img_arr,seg_arr)\n# else:\n# img_arr,seg_arr = (b['img'].numpy(),b['seg'].numpy())\n# plt.imshow(seg_arr[0])\n# plt.show()\n \n background_count = int(np.sum(seg_arr==0))\n cell_count = int(np.sum(seg_arr==1))\n border_count = int(np.sum(seg_arr==2))\n ttl_count = seg_arr.size\n \n weights = torch.Tensor(np.array([ttl_count/(background_count+1),ttl_count/(cell_count+1),ttl_count/(border_count+1)]))\n \n x = torch.Tensor(img_arr)\n y = torch.LongTensor(seg_arr)\n if self.gpuon:\n x = x.cuda()\n y = y.cuda()\n weights = weights.cuda()\n mean_nll = self.train(x,y,weights)\n mean_nll = mean_nll.cpu().data.numpy()\n if self.writetotb:\n self.writer.add_scalar('Mean Train NLL', mean_nll, i+e*num_train_batches)\n if i%25 == 0:\n print('train_iter: ' + str(i) + ' Mean Train NLL: ' + str(mean_nll))\n if (i%100 == 0) and self.saveparams:\n torch.save(self.model.state_dict(), self.nnpath + \"/model_layers=\" + str(self.layers) + \"_hidden_size=\" + str(self.hidden_size) +\\\n \"_dropout=\" + str(self.dropout) + '_lr=' + str(self.lr) + '_momentum=' + str(self.momentum) + \"_epoch_\" + str(e) + \"_step_\" + str(i) +\".pt\")\n del x\n del y\n del mean_nll\n torch.cuda.empty_cache()\n self.model.eval()\n total_test_nll = 0.\n for i,b in enumerate(val_iter):\n img_arr,seg_arr = (b['img'].numpy(),b['seg'].numpy())\n seg_arr = seg_arr[:,0]\n for t in range(seg_arr.shape[0]):\n# border = sk.morphology.binary_dilation(seg_arr[t])^sk.morphology.binary_erosion(seg_arr[t])\n binary = seg_arr[t].astype(bool)\n dilated = sk.morphology.binary_dilation(binary)\n dilated = sk.morphology.binary_dilation(dilated)\n border = dilated^binary\n seg_arr[t,border] = 2\n \n background_count = int(np.sum(seg_arr==0))\n cell_count = int(np.sum(seg_arr==1))\n border_count = int(np.sum(seg_arr==2))\n ttl_count = seg_arr.size\n \n weights = torch.Tensor(np.array([ttl_count/(background_count+1),ttl_count/(cell_count+1),ttl_count/(border_count+1)]))\n\n x = torch.Tensor(img_arr)\n y = torch.LongTensor(seg_arr)\n if self.gpuon:\n x = x.cuda()\n y = y.cuda()\n weights = weights.cuda()\n nll = self.test(x,y,weights)\n total_test_nll += nll.cpu().data.numpy()\n del x\n del y\n del nll\n torch.cuda.empty_cache()\n avgtestnll = total_test_nll/(np.prod(np.array(val_data_shape)))\n if self.writetotb:\n self.writer.add_scalar('Mean Test NLL', avgtestnll, e)\n print('Mean Test NLL: ' + str(avgtestnll))\n return avgtestnll\n \n def train_model(self,numepochs,train_data,val_data):\n train_data_shape = train_data.shape\n val_data_shape = val_data.shape\n for e in range(0,numepochs):\n train_iter = DataLoader(train_data,batch_size=self.batch_size,shuffle=True,num_workers=4)\n val_iter = DataLoader(val_data,batch_size=self.batch_size,shuffle=True,num_workers=4)\n self.perepoch(e,train_iter,val_iter,train_data_shape,val_data_shape)\n if self.saveparams:\n torch.save(self.model.state_dict(), self.nnpath + \"/model_layers=\" + str(self.layers) + \"_hidden_size=\" + str(self.hidden_size) +\\\n \"_dropout=\" + str(self.dropout) + '_lr=' + str(self.lr) + '_momentum=' + str(self.momentum) + \"_epoch_\" + str(e)+\".pt\")\n \n def get_test_pr(self,test_data,samples=1000):\n test_data_shape = test_data.shape\n test_iter = DataLoader(test_data,batch_size=samples,shuffle=True,num_workers=4)\n for i,b in enumerate(test_iter):\n x = Variable(b['img'].float())\n y = b['seg'].float().numpy()\n if self.gpuon:\n x = x.cuda()\n fx = self.model.forward(x).cpu().data.numpy()\n y_true = y.flatten()\n y_scores = fx.flatten()\n precision, recall, thresholds = precision_recall_curve(y_true, y_scores)\n break\n return precision, recall, thresholds\n \nclass UNet_Segmenter:\n \n def __init__(self,headpath,paramspath,layers=3,hidden_size=64,batch_size=100,gpuon=False):\n \n \n self.headpath = headpath\n self.paramspath = paramspath\n self.nnoutputpath = headpath + \"/nnsegmentation\"\n self.gpuon = gpuon\n self.layers = layers\n self.hidden_size = hidden_size\n self.batch_size = batch_size\n \n def segment(self,fov_num):\n torch.cuda.empty_cache()\n self.model = UNet(1,1,layers=self.layers,hidden_size=self.hidden_size)\n \n if self.gpuon:\n device = torch.device(\"cuda\")\n self.model.load_state_dict(torch.load(self.paramspath))\n self.model.to(device)\n else:\n device = torch.device('cpu')\n self.model.load_state_dict(torch.load(self.paramspath, map_location=device))\n self.model.eval()\n \n inputpath = self.nnoutputpath + \"/nninput_\" + str(fov_num) + \".hdf5\"\n outputpath = self.nnoutputpath + \"/nnoutput_\" + str(fov_num) + \".hdf5\"\n print(inputpath)\n with h5py.File(inputpath,\"r\") as infile:\n out_shape = infile[\"img\"].shape\n chunk_shape = infile[\"img\"].chunks\n data = SegmentationDataset(inputpath,training=False)\n data_iter = DataLoader(data,batch_size=self.batch_size,shuffle=False)\n with h5py.File(outputpath,\"w\") as outfile:\n img_handle = outfile.create_dataset(\"img\",out_shape,chunks=chunk_shape,dtype=float)\n for i,b in enumerate(data_iter):\n x = Variable(b['img'].float())\n if self.gpuon:\n x = x.cuda()\n fx = self.model.forward(x)\n img_handle[i*self.batch_size:(i+1)*self.batch_size] = fx.cpu().data.numpy()\n def test(self):\n print(\"test\")","sub_path":"trenchripper/unet.py","file_name":"unet.py","file_ext":"py","file_size_in_byte":40930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"596403154","text":"import os\nimport datetime\nimport jwt\nfrom functools import wraps\nfrom app.models import Posts, Users, Likes, Follows\nfrom app.forms import LoginForm, PostsForm, RegistrationForm\nfrom app import app, db, filefolder, token_key\nfrom flask import session, g, render_template, request, redirect, url_for, flash, jsonify, session\nfrom werkzeug.utils import secure_filename\n\n\n@app.route('/')\ndef index():\n \"\"\"Render website's initial page and let VueJS take over.\"\"\"\n return render_template('index.html')\n\n\ndef auth_required(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n auth = request.headers.get('Authorization', None)\n if not auth:\n return jsonify({'code': 'authorization_header_missing', 'description': 'Authorization header is expected'}), 401\n sections = auth.split()\n if sections[0].lower() != 'bearer':\n return jsonify({'code': 'invalid_header', 'description': 'Authorization header must start with bearer'}), 401 \n elif len(sections) == 1:\n return jsonify({'code': 'invalid_header', 'description': 'Token not found'}), 401\n elif len(sections) > 2:\n return jsonify({'code': 'invalid_header', 'description': 'Authorization header must bearer + \\s + token'}), 401\n \n token = sections[1]\n try:\n ids = jwt.decode(token, token_key)\n get_user_info = Users.query.filter_by(id=ids['user_id']).first()\n\n except jwt.ExpiredSignature:\n return jsonify({'code': 'expired_token', 'description': 'Your token is expired'}), 401\n except jwt.DecodeError:\n return jsonify({'code': 'token_invalid_signature', 'description': 'Token signature is invalid'}), 401\n\n g.current_user = user = ids['user_id']\n return f(*args, **kwargs)\n return decorated\n\n\n@app.route('/api/users/register', methods=['POST'])\ndef register():\n \"\"\"Accepts user information and saves it to the database\"\"\"\n form = RegistrationForm()\n if request.method == \"POST\" and form.validate_on_submit():\n username = request.form['username']\n password = request.form['password']\n firstname = request.form['first_name']\n lastname = request.form['last_name']\n email = request.form['email']\n location = request.form['location']\n biography = request.form['biography']\n photo = request.file['profile_photo']\n date_now = datetime.datetime.now()\n photo_file = secure_filename(photo.photo_file)\n user = Users(firstname = firstname, lastname = lastname, email = email, location = location, biography = biography, profile_photo = photo, joined_on = date_now, username = username, password = password)\n db.session.add(user)\n db.session.commit()\n photo.save(os.path.join(filefolder, photo_file))\n info = [{\"message\": \"User successfully registered\"}]\n return jsonify(result=info)\n all_errors = form_errors(form)\n error = [{'error': all_errors}]\n return jsonify(errors=error)\n\n\n@app.route('/api/auth/login', methods=['POST'])\ndef login():\n \"\"\"Accepts login credentials as username and password\"\"\"\n form = LoginForm()\n if request.method == \"POST\" and form.validate_on_submit():\n username = request.form['user_name']\n password = request.form['password']\n\n user = Users.query.filter_by(username = username, password = password).first()\n if user is None:\n return jsonify(errorM=\"Incorrect username or password\")\n\n ids = {'user_id': user.id}\n token = jwt.encode(ids, token_key)\n session['userid'] = user.id; \n return jsonify(info={'token': token, 'userid': user.id}, message = \"User logged in!\")\n all_errors = form_errors(form)\n error = [{'error': all_errors}]\n return jsonify(errors=error)\n\n\n@app.route('/api/auth/logout', methods=['GET'])\n@requires_auth\ndef logout():\n \"\"\"Logout a user\"\"\"\n g.current_user = None\n session.pop('userid', None)\n return jsonify(message = \"You have been logged out!\")\n\n\n@app.route('/api/users//posts', methods=['POST'])\ndef new_user_post():\n \"\"\"Used for adding posts to the user's feed\"\"\"\n form = PostsForm()\n if request.method == \"POST\" and form.validate_on_submit():\n userid = user_id\n caption = request.form['caption']\n photo_posted = request.file['photo']\n date_now = datetime.datetime.now()\n photo_file = secure_filename(photo_posted.photo_file)\n post = Posts(userid = user_id, caption = caption, created_on = date_now, photo_posted = photo)\n db.session.add(post)\n db.session.commit()\n photo_posted.save(os.path.join(filefolder, photo_file))\n info = [{\"message\": \"Post successfully created\"}]\n return jsonify(result=info)\n\n\n@app.route('/post/', methods=[\"GET\", \"POST\"])\ndef post():\n post = g.current_user\n return render_template('post.html', post=post)\n\n\n@app.route('/api/users//posts', methods=['GET'])\ndef get_post():\n \"\"\"Returns a user's posts\"\"\"\n if request.method == \"GET\":\n user = Users.query.filter_by(id=user_id).first()\n if not user:\n return jsonify({'message': 'no user found'})\n user_posts = Posts.query.filter_by(id=userid).all()\n info = []\n for user_post in user_posts:\n post_info = {'id': user_post.id, 'user_id': user_post.user_id, 'photo': user_post.photo, 'caption': user_post.caption, 'created_on': user_post.created_on}\n info.append(post_info)\n return jsonify(data=info)\n all_errors = form_errors(form)\n error = [{'error': all_errors}]\n return jsonify(errors=error)\n\n\n@app.route('/api/users//', methods=[\"GET\"])\n@requires_auth\ndef get_user(user_id):\n # Database for all post\n user = Users.query.filter_by(id=user_id).first()\n info = []\n\n # House dictionary\n if (int(user_id) == session['userid']):\n join = user.joined_on.strftime(\"%B %Y\");\n user_info= {\"userid\": user.id, \"username\": user.username, \"firstname\": user.firstname, \"lastname\": user.lastname, \"email\": user.email, \"location\": user.location, \"biography\": user.biography,\"photo\": user.profile_photo, \"joined_on\": join}\n info.append(user_info)\n return jsonify(profile = info, is_user = True)\n join = user.joined_on.strftime(\"%B %Y\");\n user_info= {\"userid\": user.id, \"username\": user.username, \"firstname\": user.firstname, \"lastname\": user.lastname, \"email\": user.email, \"location\": user.location, \"biography\": user.biography,\"photo\": user.profile_photo, \"joined_on\": join}\n output.append(user_info)\n return jsonify(profile = info)\n\n\n@app.route('/api/posts/', methods=[\"GET\"])\n@requires_auth\ndef all_posts():\n # Database for all post\n posts = Posts.query.order_by(Posts.created_on.desc()).all()\n\n # House dictionary\n info = []\n for post in posts:\n user = Users.query.filter_by(id = post.user_id).first()\n like = Likes.query.filter_by(post_id = post.id).all()\n number_of_likes = []\n for likes in number_of_likes:\n number = {'test': 'counted'}\n number_of_likes.append(number)\n liked = Likes.query.filter_by(user_id = session['userid'], post_id = post.id).first()\n if (liked is None):\n like_check = False\n else:\n like_check = True\n date_posted = post.created_on.strftime(\"%d %b %Y\");\n posted = {\"postid\": post.id, \"userid\": post.user_id, \"username\": user.username, \"profile_photo\": user.profile_photo, \"photo\": post.photo, \"caption\": post.caption, \"created_on\": post.created_on, \"likes\": number_of_likes, \"like_check\": like_check}\n info.append(posted)\n return jsonify(data = info)\n\n\n\n@app.route('/api/users//followID', methods=[\"GET\"])\n@requires_auth\ndef follow_number(user_id):\n \"\"\"Shows the amount of followers for a user\"\"\"\n follows = Follows.query.filter_by(user_id=user_id).all()\n number_of_followers = []\n for follow in follows:\n number = {'test': 'counted'}\n number_of_followers.append(number)\n return jsonify(follower=number_of_followers)\n\n\n@app.route('/api/users//following', methods=['POST'])\n@requires_auth\ndef follower_check(user_id):\n \"\"\"Create a follow relationship between the current user and the target user.\"\"\"\n check = Follows.query.filter_by(user_id = user_id, followID = session['userid']).first()\n if (check is None):\n return jsonify(following = False)\n return jsonify(following = True)\n\n\n@app.route('/api/users//follow', methods=['POST'])\n@requires_auth\ndef follow_user(user_id):\n \"\"\"Saves the user you are following\"\"\"\n follow = Follows(user_id = user_id, followID = session['userid'])\n db.session.add(follow)\n db.session.commit()\n return jsonify(message= 'You are now following this user')\n\n\n@app.route('/api/posts//like', methods=['GET'])\n@requires_auth\ndef like(post_id):\n \"\"\"Set a like on the current Post by the logged in User\"\"\"\n check_likes = Likes.query.filter_by(user_id=session['userid'], post_id=postid).first()\n if(check_likes is None):\n like = Likes(user_id = session['userid'], post_id = post_id)\n db.session.add(like)\n db.session.commit()\n return jsonify(message='You have liked a post')\n return jsonify(DB = 'You already liked the post')\n\n\n@app.route('/.txt')\ndef send_text_file(file_name):\n \"\"\"Send your static text file.\"\"\"\n file_dot_text = file_name + '.txt'\n return app.send_static_file(file_dot_text)\n\n\n@app.after_request\ndef add_header(response):\n \"\"\"\n Add headers to both force latest IE rendering engine or Chrome Frame,\n and also to cache the rendered page for 10 minutes.\n \"\"\"\n response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'\n response.headers['Cache-Control'] = 'public, max-age=0'\n return response\n\n@app.errorhandler(404)\ndef page_not_found(error):\n \"\"\"Custom 404 page.\"\"\"\n return render_template('404.html'), 404\n\n\nif __name__ == '__main__':\n app.run(debug=True, host=\"0.0.0.0\", port=\"8080\")","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"397574454","text":"# Raise from\n# try:\n# n = 1/0\n# a = int(n)\n# except Exception as e:\n# raise ValueError from e # estou dizendo que \"e\" causou ValueError\n# ASSERT\nwhile True:\n try:\n num = int(input(\"Digite um número entre 1 e 20: \"))\n except ValueError:\n print(f\"Erro!! {num} não é um valor válido!\")\n except:\n print(\"Entrada Inválida!!!!\")\n else:\n break\n\nteste = True\n\nif not 1 <= num <= 20:\n teste = False\n\nassert teste, num # o python dará erro caso o valor dessas variáveis seja != do esperado\n\nif __debug__:\n if not teste:\n raise AssertionError(num) # nesse caso o erro é criado se teste == False\n\n# Exemplo de uso do assert\n\ndef raiz(x):\n assert x > 0, 'x tem que ser maior que 0'\n return x ** 1/2\n\nraiz(-1)","sub_path":"Youtube/Exceções/raise_from_and_assert.py","file_name":"raise_from_and_assert.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"121896540","text":"# coding: utf-8\nfrom nonebot import get_bot\nfrom .sentence import get_sentence\n\n\nbot = get_bot()\n\n\n@bot.on_message()\nasync def _(ctx):\n msg = ctx.get(\"message\")[0]\n if msg.get(\"type\") == 'sign':\n print(dict(msg))\n data = msg.get(\"data\").get(\"title\")\n if \"宜\" in data:\n data = data.strip(\"宜 \")\n s = await get_sentence(data)\n if s:\n await bot.send(ctx, message=s)","sub_path":"coolq/plugins/sign/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"416007773","text":"import serial\nimport VL53L1X\n\nclass LIDAR():\n dist = 0\n recievedData = False\n\n def __init__(self, uart, i2c):\n self.uart = uart\n self.i2c = i2c\n if(self.uart != None and self.i2c == None):\n self.ser = serial.Serial(self.uart, 115200, timeout=1)\n else:\n self.tof = VL53L1X.VL53L1X(i2c_bus=1, i2c_address=i2c)\n self.tof.open()\n self.tof.start_ranging(3)\n\n # Funktion um frische Daten vom LIDAR zu erhalten\n def getData(self):\n if(self.uart != None and self.i2c == None):\n self.ser.reset_input_buffer()\n while(self.recievedData != True):\n while(self.ser.in_waiting <= 9):\n if((b'Y' == self.ser.read()) and (b'Y' == self.ser.read())):\n Dist_L = self.ser.read()\n Dist_H = self.ser.read()\n self.dist = (ord(Dist_H) * 256) + (ord(Dist_L))\n for i in range (0,5):\n self.ser.read()\n self.recievedData = True\n break\n else:\n self.dist = self.tof.get_distance() # Entfernung in mm\n self.dist = self.dist/10.0\n","sub_path":"07_Code/Lidar.py","file_name":"Lidar.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"113969649","text":"class Solution:\n \"\"\"Algorithms: Solve 'Connected Cells' Using DFS\"\"\"\n def __init__(self, m):\n self.matrix = m\n\n def get_region_size(self, row, col):\n if row < 0 or col < 0 or row >=len(self.matrix) or col >= len(self.matrix[0]):\n return 0\n if self.matrix[row][col] == 0:\n return 0\n \n # mark that we visited it already\n self.matrix[row][col] = 0\n size = 1\n for r in [row-1, row, row+1]:\n for c in [col-1, col, col+1]:\n size += self.get_region_size(r, c)\n \n return size\n\n def get_biggest_region(self):\n max_region = 0\n islands=[]\n for r in range(len(self.matrix)):\n for c in range(len(self.matrix[0])):\n if self.matrix[r][c] == 1:\n size = self.get_region_size(r,c)\n islands.append(size) \n max_region = max(max_region, size)\n \n return islands\n\n\ndef main():\n test_matrix = [[0,0,0,1,1,0,0], [0,1,0,0,1,1,0], [1,1,0,1,0,0,1], [0,0,0,0,0,1,0],[1,1,0,0,0,0,0], [0,0,0,1,0,0,0]]\n \n for r in test_matrix:\n print(r)\n\n sol = Solution(test_matrix)\n print(sol.get_biggest_region())\n\n\nif __name__== \"__main__\":\n main()\n\n","sub_path":"islands.py","file_name":"islands.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"286622789","text":"from control.matlab import step\nfrom control import *\nfrom src.Lab4.ToPlot import plot_trans_func\n\n\ndef do_direct_method(w):\n \"\"\"\n definition for analyzing regulator quality by step responce.\n parameters in research:\n - regulation time\n - hesistation\n - overshoot\n - degree of attenuation\n :return: keys for handle changing regulator quality\n \"\"\"\n\n def get_degree(ideal, actual):\n \"\"\"\n функция оценивает полученное значение по пятибальной шкале\n :param ideal: идеальное значение\n :param actual: реальное значение\n :return: оценка\n \"\"\"\n mas = [0, 1, 1.2, 1.4, 1.6, 1.8, 2]\n for i in range(len(mas) - 1):\n if mas[i] * ideal <= actual < mas[i + 1] * ideal:\n return len(mas) - 2 - i\n elif actual > mas[-1] * ideal:\n return 0\n\n t = np.linspace(0, stop=100, num=2000)\n\n counter = regulation_time = t_vr_reg = integral_mean = 0\n\n y1, t1 = step(w, t)\n\n y1 = list(y1)\n max_y = max(y1)\n last_y = y1[-1]\n\n \"\"\"перерегулирование и его оценка\"\"\"\n overshoot = (max_y - last_y) / last_y\n key_per = get_degree(27, overshoot)\n\n \"\"\"величина и время достижения первого максимума и их оценка\"\"\"\n key_vel_max = get_degree(1.1, max(y1))\n key_vr_max = get_degree(1, t[y1.index(max(y1))])\n\n two_maxes = []\n\n for num in range(len(y1[1:-1])):\n if y1[num - 1] < y1[num] > y1[num + 1]:\n two_maxes.append(y1[num])\n if len(two_maxes) == 10: break\n\n \"\"\"степень затухания и ее оценка\"\"\"\n if len(two_maxes) <= 1:\n key_deg = 5.0\n else:\n degree_of_attenuation = (1 - two_maxes[1] / two_maxes[0]) * 100\n if degree_of_attenuation <=0:\n key_deg = -1\n else: key_deg = get_degree(1 / 6.6, 1 / degree_of_attenuation)\n\n\n for i in range(len(y1)):\n if 0.95 * last_y < y1[i] < 1.05 * last_y:\n \"\"\"\n counter - счетчик входящих в диапазон установившегося значения точек\n устраняет вероятность ошибки попадания условия в нулевой промежуток колебательной\n функции\n \"\"\"\n counter += 1\n if counter == 20: # функция внутри диапазона, удовл. достаточности уст. режима.\n regulation_time = t[i]\n # номер нахождения в массиве t значения времени регулирования\n t_vr_reg = i\n break\n else: # функция еще не в установившемся значении\n counter = 0\n\n \"\"\"оценка времени регулирования\"\"\"\n key_reg = get_degree(15, regulation_time)\n\n \"\"\"оценка показателя колебательности\"\"\"\n if len(two_maxes) <= 1:\n key_koleb = 5.0\n else:\n koleb = two_maxes[1] / two_maxes[0] * 100\n key_koleb = get_degree(1.19, koleb)\n\n\n\n \"\"\"интеграл и его оценка\"\"\"\n for i in range(0, t_vr_reg):\n integral_mean = integral_mean + abs(y1[t_vr_reg] - y1[i]) * t[1]\n key_int = get_degree(0.3, integral_mean)\n\n\n # проверка системы на устойчивость\n poles, zeros = pzmap(w, Plot=False)\n if not is_sustainable(poles):\n return [-100]\n\n return [key_koleb, key_reg, key_per, key_deg,\n key_vel_max, key_vr_max, key_int]\n\n\ndef is_sustainable(poles):\n \"\"\"\n функция определения устойчивости АСУ\n :param poles: полюса системы\n :return: true/false\n \"\"\"\n\n boo = True\n\n for pole in poles:\n if pole.real > 0:\n boo = False\n break\n\n return boo","sub_path":"src/Lab4/ToAnalyze.py","file_name":"ToAnalyze.py","file_ext":"py","file_size_in_byte":4086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"608430055","text":"\"\"\"empty message\n\nRevision ID: cb30c6e79c0e\nRevises: 07e934fa6ed2\nCreate Date: 2017-02-06 13:26:06.015435\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'cb30c6e79c0e'\ndown_revision = '07e934fa6ed2'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('users', sa.Column('roles', sa.String(length=64), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('users', 'roles')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/cb30c6e79c0e_.py","file_name":"cb30c6e79c0e_.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"566494925","text":"from django.urls import path\n# from EO.views import common, user, note\nfrom EO.AppApi import views, Light, graduationPhoto\n# from django.views.generic import RedirectView\n\napp_name = \"APPApi\"\n\nurlpatterns = [\n path('login/', views.login_check),\n path('register/', views.register),\n path('note/group/', views.get_note_group),\n path('note/show/', views.note_show),\n path('note/detail/', views.note_detail),\n # path('apk/', views.download),\n path('update/getVersion', views.update_get_version),\n # path('520Light/', Light.stop, name='520'),\n # path('520Light/bullet', Light.bullet, name='520bullet'),\n # path('graduationPhoto/', graduationPhoto.test, name='graduationPhoto_test'),\n # path('graduationPhoto/index/', graduationPhoto.index, name='graduationPhoto_index'),\n path('graduationBill/', graduationPhoto.mybill, name='graduationPhoto_mybill'),\n path('graduationBill/login/', graduationPhoto.login, name='graduationPhoto_login'),\n path('graduationBill/logout/', graduationPhoto.student_logout, name='graduationPhoto_logout'),\n # path('graduationPhoto/menu/', graduationPhoto.menu, name='graduationPhoto_menu'),\n # path('graduationPhoto/getbody/', graduationPhoto.getbody, name='graduationPhoto_getbody'),\n # path('graduationPhoto/get_photo/', graduationPhoto.get_photo, name='graduationPhoto_get_photo'),\n path('graduationBill/init_student/', graduationPhoto.init_student, name='graduationPhoto_init_student'),\n\n]\n","sub_path":"EO/AppApi/apiUrls.py","file_name":"apiUrls.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"415411174","text":"import jwt\nfrom datetime import datetime\n\nsecret = '16+1zh6$-@q6&^ov8q7lk#(vak*!l+tog&44&wloh%*9yvc9-#'\n\n\ndef encode(params):\n now = datetime.now()\n date_time = now.strftime(\"%m/%d/%Y, %H:%M:%S\")\n params.update({\"time\": date_time})\n jwt_token = jwt.encode(params, secret, algorithm='HS256')\n return jwt_token\n\n","sub_path":"neurex_app/token.py","file_name":"token.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"534839000","text":"# Copyright (c) 2019 UniMoRe, Matteo Spallanzani\n# Copyright (c) 2019 ETH Zurich, Lukas Cavigelli\n\nimport torch\nimport torch.nn as nn\n\n\n# In order for the baselines to be launched with the same logic as quantized\n# models, an empty quantization scheme and an empty thermostat schedule need\n# to be configured.\n# Use the following templates for the `net` and `thermostat` configurations:\n#\n# \"net\": {\n# \"class\": \"AlexNetBaseline\",\n# \"params\": {\"capacity\": 1},\n# \"pretrained\": null,\n# \"loss_fn\": {\n# \"class\": \"CrossEntropyLoss\",\n# \"params\": {}\n# }\n# }\n#\n# \"thermostat\": {\n# \"class\": \"AlexNetBaseline\",\n# \"params\": {\n# \"noise_schemes\": {},\n# \"bindings\": []\n# }\n# }\n\nclass AlexNetBaseline(nn.Module):\n \"\"\"AlexNet Convolutional Neural Network.\"\"\"\n def __init__(self, capacity):\n super().__init__()\n c0 = 3\n c1 = int(64 * capacity)\n c2 = int(64 * 3 * capacity)\n c3 = int(64 * 6 * capacity)\n c4 = int(64 * 4 * capacity)\n c5 = 256\n nh = 4096\n # convolutional layers\n self.phi1_conv = nn.Conv2d(c0, c1, kernel_size=11, stride=4, padding=2, bias=False)\n self.phi1_mp = nn.MaxPool2d(kernel_size=3, stride=2)\n self.phi1_bn = nn.BatchNorm2d(c1)\n self.phi1_act = nn.ReLU6()\n self.phi2_conv = nn.Conv2d(c1, c2, kernel_size=5, padding=2, bias=False)\n self.phi2_mp = nn.MaxPool2d(kernel_size=3, stride=2)\n self.phi2_bn = nn.BatchNorm2d(c2)\n self.phi2_act = nn.ReLU6()\n self.phi3_conv = nn.Conv2d(c2, c3, kernel_size=3, padding=1, bias=False)\n self.phi3_bn = nn.BatchNorm2d(c3)\n self.phi3_act = nn.ReLU6()\n self.phi4_conv = nn.Conv2d(c3, c4, kernel_size=3, padding=1, bias=False)\n self.phi4_bn = nn.BatchNorm2d(c4)\n self.phi4_act = nn.ReLU6()\n self.phi5_conv = nn.Conv2d(c4, c5, kernel_size=3, padding=1, bias=False)\n self.phi5_mp = nn.MaxPool2d(kernel_size=3, stride=2)\n self.phi5_bn = nn.BatchNorm2d(c5)\n self.phi5_act = nn.ReLU6()\n # fully connected layers\n self.phi6_fc = nn.Linear(c5 * 6 * 6, nh, bias=False)\n self.phi6_bn = nn.BatchNorm1d(nh)\n self.phi6_act = nn.ReLU6()\n self.phi7_fc = nn.Linear(nh, nh, bias=False)\n self.phi7_bn = nn.BatchNorm1d(nh)\n self.phi7_act = nn.ReLU6()\n self.phi8_fc = nn.Linear(nh, 1000)\n\n def forward(self, x, withStats=False):\n x = self.phi1_conv(x)\n x = self.phi1_mp(x)\n x = self.phi1_bn(x)\n x = self.phi1_act(x)\n x = self.phi2_conv(x)\n x = self.phi2_mp(x)\n x = self.phi2_bn(x)\n x = self.phi2_act(x)\n x = self.phi3_conv(x)\n x = self.phi3_bn(x)\n x = self.phi3_act(x)\n x = self.phi4_conv(x)\n x = self.phi4_bn(x)\n x = self.phi4_act(x)\n x = self.phi5_conv(x)\n x = self.phi5_mp(x)\n x = self.phi5_bn(x)\n x = self.phi5_act(x)\n x = x.view(-1, torch.Tensor(list(x.size()[-3:])).to(torch.int32).prod().item())\n x = self.phi6_fc(x)\n x = self.phi6_bn(x)\n x = self.phi6_act(x)\n x = self.phi7_fc(x)\n x = self.phi7_bn(x)\n x = self.phi7_act(x)\n x = self.phi8_fc(x)\n x = self.phi8_bn(x)\n if withStats:\n stats = []\n stats.append(('phi1_conv_w', self.phi1_conv.weight.data))\n stats.append(('phi2_conv_w', self.phi2_conv.weight.data))\n stats.append(('phi3_conv_w', self.phi3_conv.weight.data))\n stats.append(('phi4_conv_w', self.phi4_conv.weight.data))\n stats.append(('phi5_conv_w', self.phi5_conv.weight.data))\n stats.append(('phi6_fc_w', self.phi6_fc.weight.data))\n stats.append(('phi7_fc_w', self.phi7_fc.weight.data))\n stats.append(('phi8_fc_w', self.phi8_fc.weight.data))\n return stats, x\n return x\n\n def forward_with_tensor_stats(self, x):\n stats, x = self.forward(x, withStats=True)\n return stats, x\n","sub_path":"quantlab/ImageNet/AlexNet/alexnetbaseline.py","file_name":"alexnetbaseline.py","file_ext":"py","file_size_in_byte":4060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"485191343","text":"# sort() method = used with lists\n# sort() function = used with iterables\n\n# students = [\"Squidward\", \"Sandy\", \"Patrick\", \"Spongebob\", \"Mr. Crabs\"]\n# sorted_students = sorted(students, reverse=True) #list = func(tuple)\n# students.sort(reverse=True)\n#\n# for i in students:\n# print(i)\n\n\nstudents = [(\"Squidward\", \"F\", 50),\n (\"Sandy\", \"A\", 33),\n (\"Patrick\", \"D\", 20),\n (\"Spongebob\", \"F\", 10),\n (\"Mr. Crabs\", \"B\", 22)]\n\n# students.sort() #by first collumn\n\ngrade = lambda grades:grades[1]\n\nage = lambda ages:ages[2]\nstudents.sort(key=age)\n\nfor i in students:\n print(i)\n\nprint(\"_____________tuple sort now\")\nstudentstuple = ((\"Squidward\", \"F\", 50),\n (\"Sandy\", \"A\", 33),\n (\"Patrick\", \"D\", 20),\n (\"Spongebob\", \"F\", 10),\n (\"Mr. Crabs\", \"B\", 22))\n\nage = lambda stuage:stuage[2]\nsorted_tuplestudents = sorted(students, key=age)\n\nfor i in students:\n print(i)","sub_path":"sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"473752927","text":"from game_func import *\n\nprint(\"Bataille navale de Pierre-Louis Leroy et Louis Girones\")\nprint(\"------------------------------------------------------\")\nprint(\"Vous disposez de 10 missiles pour detruire tous les bateaux adverses.\")\nprint(\"Si vous touchez ou coulez un bateau vous recuperez un missile.\")\n#boucle de jeu\njouer=\"1\"\nwhile(jouer==\"1\"):\n \n #____Nombre de tir \n shoot=10\n\n #____Initialisation des variable de statistique\n tour=0\n plouf=0\n touche=0\n coule=0\n \n #____Creation des grilles\n grilleOrdi = grilleJeu(10, \"0\")\n grilleJoueur = grilleJeu(10, \"*\")\n\n #____Placement des bateaux\n #33 case occupee sur 100 = une chance sur 3\n\n #2 bateau 4 case\n ajoutBateau(\"P\",grilleOrdi)\n ajoutBateau(\"P\",grilleOrdi)\n #3 batau 3 case\n ajoutBateau(\"C\",grilleOrdi)\n ajoutBateau(\"C\",grilleOrdi)\n ajoutBateau(\"C\",grilleOrdi)\n #5 bateau 2 case\n ajoutBateau(\"T\",grilleOrdi)\n ajoutBateau(\"T\",grilleOrdi)\n ajoutBateau(\"T\",grilleOrdi)\n ajoutBateau(\"T\",grilleOrdi)\n ajoutBateau(\"T\",grilleOrdi)\n #6 bateau 1 case\n ajoutBateau(\"S\",grilleOrdi)\n ajoutBateau(\"S\",grilleOrdi)\n ajoutBateau(\"S\",grilleOrdi)\n ajoutBateau(\"S\",grilleOrdi)\n ajoutBateau(\"S\",grilleOrdi)\n ajoutBateau(\"S\",grilleOrdi)\n \n\n \n ###____Debut de la partie\n print(\"\\n\")\n while(shoot>0 and tousMort(grilleOrdi)==False):\n affichageGrille(grilleJoueur)\n #___Affichage des info sur la partie\n print(\"Missile restant: \"+str(shoot)+\"\\n\")\n print(\"Entrez vos coordonnees au format ligne colonne\\n\\nEx:A0 pour la première ligne de la colonne A\\n\")\n print(\"Entrez 00 pour quittez le jeu\\n\")\n coordonnees = input(\"Coordonnees?\")\n #quitter la partie\n if(coordonnees==\"00\"):\n jouer=\"2\"\n shoot=0\n viderEcran()\n print(\"Bombardement sur la case:\"+coordonnees+\"\\n\")\n #si le tir est valide\n #dans la grille ET sur une case vierge SINON on recup le missile et la variable tour n'est pas incrémentée\n if(verifCoordTir(coordonnees, grilleJoueur)):\n #si sa touche on gagne un tir gratuit\n if(tir(coordonnees, grilleOrdi,grilleJoueur)):\n shoot+=1\n touche+=1\n #si sa coule\n if(toucheCoule(coordonnees,grilleOrdi)):\n coule+=1\n else:\n plouf+=1\n else:\n shoot+=1\n tour -=1\n\n #Fin du tour \n tour +=1\n shoot-=1\n\n\n #fin de la partie\n viderEcran()\n affichageGrille(grilleJoueur)\n print(\"\\n\\nStatistiques de la partie\\n\")\n if(shoot==0):\n print(\"Perdu en \"+str(tour)+\" tours \\nTir dans l'eau: \"+str(plouf)+\" \\nTir touché:\"+str(touche)+\"\\nNombre de bateau coulé:\"+str(coule)+\"\\n\\n\")\n else:\n print(\"Gagne en \"+str(tour)+\" tours \\nTir:\\n\\nA l'eau: \"+str(plouf)+\" \\nTouché:\"+str(touche)+\"\\nCoulé:\"+str(coule)+\"\\n\\n\")\n jouer=input(\"1 pour rejouer ou 0 pour quitter\") \n\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"300541310","text":"# part 1 \nnames = ['MUMBAI', 'PUNE', 'AHMEDABAD']\nlower_names = [i.islower() for i in names]\nprint(lower_names)\n\nnos = [10, 20, 30, 40]\n\ndouble_nos = [i*i for i in nos]\nprint(double_nos)\n\n# part 2 \nmylist = [10,20,30,40]\nnew_list = [ i for i in mylist if i>20]\nprint(new_list)\n\nnew_list = [ i if i>20 else 0 for i in mylist]\nprint(new_list)\n\n\n# part 3\nfrom random import randint\nrandom_list = [ randint(1,5) for i in range(10) ]\n\nnumber_list = [[1,2,3], [4,5,6], [7,8,9]]\nnew_list = [value for sublist in number_list for value in sublist]\nprint(new_list)","sub_path":"Session02/list_comp.py","file_name":"list_comp.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"360494903","text":"\"\"\"2.6 Faça um Programa que leia três números e mostre o maior deles.\"\"\"\n\nn1 = int(input('Primeiro número: '))\nn2 = int(input('Segundo número: '))\nn3 = int(input('Terceiro número: '))\n\nif n1 > n2 > n3 or n1 > n3 > n2:\n maior = n1\nelif n2 > n1 > n3 or n2 > n3 > n1:\n maior = n2\nelif n3 > n1 > n2 or n3 > n2 > n1:\n maior = n3\n\nprint(f'O maior número é {maior}.')","sub_path":"python/python-brasil/02-estruturas-decisao/ex06_maior3.py","file_name":"ex06_maior3.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"23172413","text":"from argparse import ArgumentParser\nfrom glob import glob\nimport os\nfrom data_utils import *\nfrom pixel_flipping_salience import *\nimport csv\nfrom sklearn.metrics import log_loss\nimport torch.optim as optim\nfrom torch.utils import data\nimport torch.nn.functional as f\nfrom keras.preprocessing.image import ImageDataGenerator\nimport torch\n\n############################### Arguments #####################################\nparser = ArgumentParser()\n\nparser.add_argument(\n '--train_dir',\n type=str,\n default='Train',\n help='Where to look for training images.')\nparser.add_argument(\n '--validation_dir',\n type=str,\n default='Validation',\n help='Where to look for validation input images.')\nparser.add_argument(\n '--model_location',\n type=str,\n default=None,\n help='The directory where the saved model is located if testing.')\nparser.add_argument(\n '--mode',\n type=str,\n choices=['train', 'test'],\n default='train',\n help='Whether to finetune a model or test a trained one.')\nparser.add_argument(\n '--model',\n type=str,\n choices=['vgg11_bn',\n 'vgg16',\n 'vgg16_bn',\n 'vgg19_bn',\n 'vgg19',\n 'resnet18',\n 'resnet34',\n 'resnet101',\n 'resnet50',\n 'resnet152',\n 'squeezenet1_0',\n 'squeezenet1_1',\n 'densenet121',\n 'densenet169',\n 'densenet201',\n 'fbresnet152',\n 'resnext101_32x4d',\n 'resnext101_64x4d',\n 'inceptionv4',\n 'inceptionresnetv2',\n 'nasnetamobile',\n 'senet154',\n 'se_resnet50',\n 'se_resnet101',\n 'se_resnet152',\n 'se_resnext50_32x4d',\n 'se_resnext101_32x4d',\n 'cafferesnet101',\n 'polynet',\n 'pnasnet5large'],\n default='resnet50',\n help='Which model to train. Default is ResNet-50.')\nparser.add_argument(\n '--image_size',\n type=str,\n default='224,224',\n help='The size to make each input square. Default is 200x200.')\nparser.add_argument(\n '--batch_size',\n type=int,\n default=40,\n help='Batch size to use for training. Default 64.')\nparser.add_argument(\n '--class_weights',\n type=str,\n default=None,\n help='How much more to weight each class in the loss function.')\nparser.add_argument(\n '--class_weight_indices',\n type=str,\n default=None,\n help='The corresponding class indices for the class weights.')\nparser.add_argument(\n '--n_epochs',\n type=int,\n default=250,\n help='Number of training epochs. Default 250.')\nparser.add_argument(\n '--fliplr',\n type=bool,\n default=False,\n help='Augment the images by flipping some of them across y-axis randomly. Default False.')\nparser.add_argument(\n '--flipud',\n type=bool,\n default=False,\n help='Augment images by flipping some of them across x-axis. Default False.')\nparser.add_argument(\n '--learning_rate',\n type=float,\n default=.0001,\n help='Learning rate for training. Default is 1e-4.')\nparser.add_argument(\n '--lr_decay',\n type=float,\n default=0.2,\n help='Learning rate decay parameter.')\nparser.add_argument(\n '--save_freq',\n type=int,\n default=200,\n help=\"How often in training iterations to view model's progress.\")\nparser.add_argument(\n '--rotation_range',\n type=int,\n default=0,\n help='Rotation angle in degrees when augmenting.')\nparser.add_argument(\n '--brightness_shift_range',\n type=str,\n default=None,\n help='Amount to shift the brightness. E.g. 10,40')\n###############################################################################\n\nargs = parser.parse_args()\nmodel_name = args.model\nim_size = [int(n) for n in args.image_size.split(',')]\nbatch_size = args.batch_size\nargs.train_dir = os.path.abspath(args.train_dir)\nargs.validation_dir = os.path.abspath(args.validation_dir)\n\nif args.brightness_shift_range is not None:\n bright_range = [int(num) for num in args.brightness_shift_range.split(',')]\nelse:\n bright_range = None\n\nsave_name = 'learning_rate_{}_model_{}_batchsize_{}' \\\n .format(args.learning_rate,\n args.model,\n batch_size)\n\n# make a new folder for the results of this model if not one already\nif save_name not in os.listdir():\n os.mkdir(save_name)\n\n\ndef normalize(input_image):\n if np.amin(input_image) < 0.:\n input_image += np.amin(input_image)\n elif np.amin(input_image) > 0.:\n input_image -= np.amin(input_image)\n\n input_image /= (np.amax(input_image) + 1e-10)\n\n if in_range[-1] == 255:\n input_image *= 255.\n\n for channel in range(3):\n input_image[channel, ...] -= mu[channel]\n input_image[channel, ...] /= sigma[channel]\n\n return input_image\n\n\ndef create_img_gen(flipud, fliplr, ht_shift, wd_shift, rot_range, bright_shift):\n return ImageDataGenerator(horizontal_flip=flipud,\n vertical_flip=fliplr,\n height_shift_range=ht_shift,\n width_shift_range=wd_shift,\n fill_mode='reflect',\n validation_split=0.0,\n rotation_range=rot_range,\n brightness_range=bright_shift,\n data_format='channels_first',\n preprocessing_function=normalize)\n\n\nif args.mode == 'train':\n # instantiate the training image generator and iterator\n train_gen = create_img_gen(args.flipud,\n args.fliplr,\n .15,\n .15,\n args.rotation_range,\n bright_range)\n train_iterator = train_gen.flow_from_directory(\n args.train_dir,\n target_size=(im_size[0], im_size[1]),\n class_mode='categorical',\n batch_size=batch_size)\n\n # instantiate the validation image generator and iterator\n val_gen = create_img_gen(False, False, None, None, 0, None)\n val_iterator = val_gen.flow_from_directory(\n args.validation_dir,\n target_size=(im_size[0], im_size[1]),\n class_mode='categorical',\n batch_size=batch_size,\n shuffle=False)\n\n n_classes = train_iterator.num_classes\n\n\n model = load_pretrained_model(model_name, im_size)\n model = freeze_weights(model, model_name)\n model = add_trainable_layers(model, model_name, n_classes)\n\n mu, sigma, in_range = model.mean, model.std, model.input_range\n\n #if torch.cuda.device_count() > 1:\n # model = torch.nn.DataParallel(model)\n class_weights = np.ones([n_classes, ])\n if args.class_weights is not None:\n class_penalties = [float(num) for num in args.class_weights.split(',')]\n class_indices = [int(num) for num in args.class_weight_indices.split(',')]\n assert(len(class_penalties) == len(class_indices)), 'num. penalties and indices not equal'\n\n for weight_indx in range(len(class_indices)):\n position = class_indices[weight_indx]\n class_weights[position] = class_penalties[weight_indx]\n\n\n loss_func = nn.CrossEntropyLoss(weight=torch.from_numpy(class_weights).cuda().float()) # define loss function\n opt = optim.Adam(model.parameters(), lr=args.learning_rate) # define optimizer\n\n\n for epoch in range(args.n_epochs):\n model_trainer(model,\n train_iterator,\n loss_func,\n opt,\n args.save_freq,\n val_iterator,\n save_name,\n epoch)\n\nelif args.mode == 'test':\n args.validation_dir = os.path.abspath(args.validation_dir)\n n_classes = len(os.listdir(args.validation_dir))\n\n args.model_location = os.path.abspath(args.model_location)\n weights_files = glob(os.path.join(args.model_location, '*.pt'))\n model = load_pretrained_model(model_name, im_size)\n model = add_trainable_layers(model, model_name, n_classes)\n mu, sigma, in_range = model.mean, model.std, model.input_range\n model.load_state_dict(torch.load(weights_files[-1]))\n model = model.cuda()\n model.eval()\n\n pixy = PixelFlip(model, args.validation_dir, args.model_location)\n pixy.flip()\n","sub_path":"train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":8634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"471900349","text":"from django.shortcuts import render, get_object_or_404\nfrom .models import Post, Category\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n# Create your views here.\n\n\n# страница блога (всех статтей)\ndef blog(request, category_slug=None,):\n category = None\n categories = Category.objects.all()\n post = Post.objects.filter(post_available=True)\n current_page = Paginator(post, 5)\n page = request.GET.get('page')\n\n if category_slug:\n category = get_object_or_404(Category, category_slug=category_slug)\n post = Post.objects.filter(post_category=category)\n current_page = Paginator(post, 5)\n page = request.GET.get('page')\n\n try:\n posts = current_page.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n posts = current_page.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n posts = current_page.page(current_page.num_pages)\n\n return render(request, 'blog/blog.html', {\n 'categories': categories,\n 'category': category,\n 'posts': posts\n })\n\n\n# Страница поста\ndef post_detail(request, id):\n post = get_object_or_404(Post, id=id, post_available=True)\n return render(request, 'blog/post_detail.html', {'post': post})","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"627374042","text":"import os\nimport glob\nimport xml.etree.ElementTree as ET\nimport re\n\n# Parser to Extract Section Titles and Sections from an XMLfile\n# OutputFile is temporarily placed in an output file, later placed into a database\n# XML files must be in the same place as this python file\n# XML files taken from GROBID API call\n\n# ASSUME: paper already has file for it and we are currently inside it\n\n\nforbidden_chars_table = str.maketrans('\\/*?:\"<>|', '_________')\n\n\ndef parseSection(XMLroot, device):\n\n parse_abstract(XMLroot, device)\n cite_vals = parse_and_find_citation_vals(XMLroot, device)\n\n return cite_vals\n\n\ndef parse_abstract(root, device):\n\n abstract = next(root.iter(\"{http://www.tei-c.org/ns/1.0}abstract\"))\n\n try:\n abstract = abstract.find(\"{http://www.tei-c.org/ns/1.0}p\").text\n\n except:\n abstract = \"No Abstract Extracted\"\n\n device.sections['Abstract'] = abstract\n device.abstract = abstract\n\n\"\"\"\nParses through section to extract text and also counts number of times each citation is cited and where it was cited\nOutputs:\ncite_occurance: for each citation, gives the number of times it was cited in the text\ncitation_placement: for each citation, give the sections where it was cited\nunaccounted_citations: \n\"\"\"\n\ndef parse_and_find_citation_vals(root, device):\n\n body = next(root.iter(\"{http://www.tei-c.org/ns/1.0}body\"))\n\n cite_occurrence = {}\n citation_placement = {}\n citations_not_accounted = []\n for div in body.iter(\"{http://www.tei-c.org/ns/1.0}div\"):\n section = div.find(\"{http://www.tei-c.org/ns/1.0}head\")\n\n sectionNumber = section.get('n')\n sectionTitle = section.text\n sectionTitle = sectionTitle.translate(forbidden_chars_table)\n\n if sectionNumber is not None:\n section_file = sectionNumber + ' ' + sectionTitle\n else:\n section_file = sectionTitle\n\n paragraphs = []\n\n for paragraph in div.findall(\"{http://www.tei-c.org/ns/1.0}p\"):\n text = paragraph.text\n for ref in paragraph.findall(\"{http://www.tei-c.org/ns/1.0}ref\"): # extract text after the references\n if ref is not None:\n extract_ref_count(ref, cite_occurrence, citation_placement, citations_not_accounted, section_file)\n if ref.tail and ref.text is not None:\n text = text + ref.text + ref.tail\n elif ref.text is not None:\n text = text + ref.text\n\n paragraphs.append(text)\n\n if section_file in device.sections:\n count = 1\n section_file = section_file + '(1)'\n while section_file in device.sections:\n count += 1\n section_file = section_file[:-3] + '(' + str(count) + ')'\n\n device.sections[section_file] = paragraphs\n\n return cite_occurrence, citation_placement, citations_not_accounted\n\n\ndef extract_ref_count(ref, cite_occurrence, citation_placement, citations_not_accounted, section_file):\n attributes = ref.attrib\n if ref.text is not None:\n if 'type' in attributes:\n if attributes['type'] == 'bibr':\n \"\"\"\n if ref_regex has only one item, then the text is either [/d/d?] or (/d/d?)\n if not, then the text in the form (Author Name, \\d\\d\\d\\d)\n \"\"\"\n if 'target' in attributes:\n bibr_number = attributes['target']\n if bibr_number is not None:\n bibr_number = int(bibr_number[2:])\n ref_regex = re.findall(r'\\d\\d?', ref.text)\n\n if len(ref_regex) == 1:\n ref_number = compare_ref_numbers(bibr_number, int(ref_regex[0]))\n else:\n # if our citation in the form of (Author Name, \\d\\d\\d\\d), assume the bibr number is correct\n ref_number = bibr_number + 1\n add_ref_count(ref_number, cite_occurrence, citation_placement, section_file)\n\n else:\n ref_regex = re.findall(r'\\d\\d?', ref.text)\n if len(ref_regex) == 1:\n # conditional to stop equation references added to ref-count\n if '(' not in ref.text and ')' not in ref.text:\n ref_number = int(ref_regex[0])\n add_ref_count(ref_number, cite_occurrence, citation_placement, section_file)\n else:\n # if GROBID can't match reference, add to citations_not_accounted,\n # citations_not_accounted is a list of tuples with the ref text and the section it was cited in\n citations_not_accounted.append((ref.text, section_file))\n\n\ndef compare_ref_numbers(bibr_number, text_number):\n # Correct format: bibr_number + 1 == text_number\n if (text_number - bibr_number) == 1:\n return bibr_number + 1\n else:\n # if not in the right format, default to the number in the text\n return text_number\n\n\ndef add_ref_count(ref_number, cite_occurrence, citation_locations, section_file):\n if ref_number not in cite_occurrence:\n cite_occurrence[ref_number] = 1;\n citation_locations[ref_number] = [section_file]\n print(str(ref_number) + \" cited: one time\")\n\n else:\n cite_occurrence[ref_number] += 1\n if section_file not in citation_locations[ref_number]:\n citation_locations[ref_number].append(section_file)\n print(str(ref_number) + \" cited: %s times\" % str(cite_occurrence[ref_number]))\n\n\n\n\n\n\n\n\n","sub_path":"src/SectionParser.py","file_name":"SectionParser.py","file_ext":"py","file_size_in_byte":5702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"480049486","text":"from subprocess import call\nfrom google.cloud import storage\nimport base64\n\ndef render(request):\n\n envelope = request.get_json()\n if not envelope:\n msg = 'no Pub/Sub message received'\n print(f'error: {msg}')\n return f'Bad Request: {msg}', 400\n\n if not isinstance(envelope, dict) or 'message' not in envelope:\n msg = 'invalid Pub/Sub message format'\n print(f'error: {msg}')\n return f'Bad Request: {msg}', 400\n\n text = 'HELLO'\n pubsub_message = envelope['message']\n if isinstance(pubsub_message, dict) and 'data' in pubsub_message:\n text = base64.b64decode(pubsub_message['data']).decode('utf-8').strip()\n\n location = '/tmp/renders/'\n suffix = 'tempfile'\n filename = location + suffix + '0001.png'\n blender_file = \"models/outrun.blend\"\n\n # This script changes the text, it is run inside our 3D software. \n blender_expression = \"import bpy; bpy.data.objects['Text'].data.body = '%s'\" % text\n # Render 3D image\n call('blender -b %s --python-expr \"%s\" -o %s%s -f 1' % (blender_file, blender_expression, location, suffix), shell=True)\n\n # upload file to GCS\n client = storage.Client()\n bucket = client.get_bucket('')\n blobname = text + '.png'\n blob = bucket.blob(blobname)\n blob.upload_from_filename(filename)\n\n #returns a public url\n return blob.public_url\n","sub_path":"function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"288068093","text":"'''\nTime complexity: n\nFirst sort the array. Start with index 0 and (0+k-1) i.e the fisrt and last elements of the subarray with k elements (our subarray).\nWe then check the difference of this two element as they are the max and min of this subarray. Continue till index (n-k-1).\n'''\n\ndef maxMin(k, arr, n):\n arr.sort()\n i = 0\n ans = arr[k-1] - arr[0]\n for i in range(n - k + 1):\n temp = arr[i + k - 1] - arr[i]\n if temp < ans:\n ans = temp\n return ans\n","sub_path":"Greedy Algorithms/Max Min/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"273000643","text":"from flask import render_template, redirect, g, request\nfrom app import app, models\n\n\n@app.route(\"/\", methods=['GET', 'POST'])\n@app.route(\"/index\", methods=['GET', 'POST'])\ndef index():\n data = models.getData()\n return render_template(\"index.html\",\n urges={\"table_name\": \"urges_impulses\", \"data\": data[\"urges\"]},\n emotions={\"table_name\": \"emotions\", \"data\": data[\"emotions\"]},\n distortions={\n \"table_name\": \"cognitive_distortions\",\n \"data\": data[\"distortions\"]\n },\n behaviors={\n \"table_name\": \"behaviors_actions\",\n \"data\": data[\"behaviors\"]\n }\n )\n\n\n@app.route(\"/update\", methods=['GET', 'POST'])\ndef update():\n table = request.form.get(\"table\")\n name = request.form.get(\"name\")\n day_of_week = request.form.get(\"day_of_week\")\n value = request.form.get(\"value\")\n row_number = request.form.get(\"row_number\")\n models.updateData(table, name, day_of_week, value, row_number)\n # print request.form\n if value is not None:\n return value\n else:\n return name\n\n\n@app.route(\"/clear\", methods=['GET', 'POST'])\ndef clear():\n models.clearData()\n return redirect(\"/\")\n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"20920953","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Oct 24 10:19:52 2019\r\n\r\n@author: giles\r\n\"\"\"\r\n\r\n# File handling in Python\r\n# Python can open, close, read to and write to files\r\n\r\n#f = open('kipling.txt','w')\r\n#\r\n#print(type(f))\r\n#\r\n#f.write('If you can keep your head while all about you \\nare losing theirs\\\r\n#and blaming it on you,\\n')\r\n#\r\n#f.write('If you can trust yourself when all men doubt you,\\n\\\r\n#But make allowance for their doubting too;\\n')\r\n#\r\n#f.write('If you can wait and not be tired by waiting,\\n\\\r\n#Or being lied about, don\\'t deal in lies,\\n')\r\n#\r\n#f.write('Or being hated, don\\'t give way to hating,\\n\\\r\n#And yet don\\'t look too good, nor talk too wise:\\n')\r\n#\r\n#f.close()\r\n#\r\n#f = open('kipling.txt','r')\r\n#\r\n#print(type(f))\r\n#\r\n#print(f.read())\r\n#f.close()\r\n#\r\n#f = open('kipling.txt','r')\r\n#\r\n#print(f.readline())\r\n#f.close()\r\n#print()\r\n#\r\n#f = open('kipling.txt','r')\r\n#\r\n#print(type(f))\r\n#\r\n#print(f.readlines())\r\n#f.close()\r\n\r\n#f = open('kipling.txt','r')\r\n#\r\n#print(type(f))\r\n#\r\n#content = f.readlines()\r\n#f.close()\r\n\r\n\r\n#\r\n#f = open('kipling.txt','a')\r\n#f.write('If you can dream - and not make dreams your master;\\n\\\r\n#If you can think - and not make thoughts your aim;\\n')\r\n#f.close()\r\n#print()\r\n#f = open('kipling.txt','r')\r\n#print(f.read())\r\n#f.close()\r\n#print()\r\n#with open('kipling.txt','r') as f:\r\n# for line in f.readlines():\r\n# print(line,end='')\r\n\r\n\r\n# Functions\r\n\r\n#print('Hello, world!')\r\n\r\n#def hello():\r\n# print('Hello, world!')\r\n#\r\n#hello()\r\n##\r\n#for i in range(5):\r\n# hello()\r\n\r\n\r\n#def hi(name):\r\n# print(f'Hello, {name}!')\r\n## \r\n#hi('Giles') \r\n#hi('Anthony')\r\n\r\n\r\n#hi()\r\n\r\n#def hi_2(name='Giles'):\r\n# print(f'Hello, {name}!')\r\n##\r\n#hi_2()\r\n\r\n#n=20\r\n#a = 0\r\n#b = 1\r\n#for i in range(n):\r\n# a,b = b,a+b\r\n#print(a) \r\n# \r\n# \r\n# \r\ndef fib(n):\r\n ''' Calculates and returns the nth fibonacci number'''\r\n a = 0\r\n b = 1\r\n for i in range(n):\r\n a,b = b,a+b\r\n return a\r\n# \r\n## \r\n#fib_num = fib(20)\r\n#print(fib_num)\r\n## \r\n#for i in range(20):\r\n# print(fib(i))\r\n \r\n \r\n# Docstring\r\n \r\n#def calc_mean(first,*remainder):\r\n# '''\r\n# This calculates the mean of numbers.\r\n# '''\r\n# mean = (first + sum(remainder))/ (1 + len(remainder))\r\n# print(type(remainder))\r\n# return mean\r\n# \r\n#print(calc_mean(23,43,56,76,45,34,65,78,975,3456,54))\r\n# \r\n#\r\n# \r\n \r\n# Recursion\r\n\r\ndef fib_2(n):\r\n if n == 0:\r\n return 0\r\n elif n == 1:\r\n return 1\r\n else:\r\n return fib_2(n-1) + fib_2(n-2)\r\n# \r\n \r\n#x = fib_2(20)\r\n#print(x) \r\n#y = fib(1000)\r\n#print(y)\r\n## \r\n#x = fib_2(37) \r\n#print(x)\r\n\r\n\r\n \r\nimport timeit\r\n\r\n\r\nt1 = timeit.Timer(\"fib(36)\",\"from greetings import fib\")\r\nprint(t1.timeit(5))\r\n \r\nt2 = timeit.Timer(\"fib_2(36)\",\"from greetings import fib_2\")\r\nprint(t2.timeit(5))\r\n\r\n\r\n","sub_path":"PYTHON NOTEBOOKS/9.0.1_Files_&_Functions.py","file_name":"9.0.1_Files_&_Functions.py","file_ext":"py","file_size_in_byte":2846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"357596713","text":"#!/usr/bin/env python3\n\n\nimport pandas as pd\n\n\nclass Candle():\n\n def Build(self, csvFile, tf):\n # csv format: 'EURUSD-2016-01.csv'\n # tf format: '15Min', '1H'\n data_frame = pd.read_csv(csvFile, names=['Symbol', 'Date_Time', 'Bid', 'Ask'], index_col=1, parse_dates=True)\n data_frame.head()\n data_ask = data_frame['Ask'].resample(tf).ohlc()\n data_bid = data_frame['Bid'].resample(tf).ohlc()\n data_ask.head()\n data_bid.head()\n data_ask_bid = pd.concat([data_ask, data_bid], axis=1, keys=['Ask', 'Bid'])\n return data_ask_bid\n","sub_path":"candle.py","file_name":"candle.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"475442210","text":"#!/usr/bin/env python\n\nimport gtk, webkit\nfrom time import sleep\n\ndef go_but(widget):\n add = addressbar.get_text()\n if add.startswith(\"http://\"):\n web.open(add)\n else:\n add=\"http://\" + add\n web.open(add)\n sleep(1)\n #addressbar.set_text(add)\n addressbar.set_text(web.get_main_frame().get_uri())\n\ndef Jeedom_but(widget):\n web.open(HomePage)\n addressbar.set_text(web.get_main_frame().get_uri())\n\nHomePage ='http://jeedom.thetech.zone/index.php?v=m'\n\nwin = gtk.Window()\nwin.set_size_request(800,480)\nwin.connect('destroy',lambda w: gtk.main_quit())\n\n\nbox1 = gtk.VBox()\nwin.add(box1)\n\nbox2 = gtk.HBox()\nbox1.pack_start(box2, False)\n\naddressbar = gtk.Entry()\nbox2.pack_start(addressbar)\n\n\ngobutton = gtk.Button(\"Go\")\nbox2.pack_start(gobutton)\ngobutton.connect('clicked', go_but)\n\nJeedomButton = gtk.Button(\"Jeedom\")\nbox2.pack_start(JeedomButton)\nJeedomButton.connect('clicked', Jeedom_but)\n\n\n\nscroller = gtk.ScrolledWindow()\nbox1.pack_start(scroller)\n\nweb = webkit.WebView()\nscroller.add(web)\nweb.open(HomePage)\n\nwin.show_all()\n#print(web.get_main_frame().get_uri())\ngtk.main()\n","sub_path":"PyControlGui.py","file_name":"PyControlGui.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"253311476","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/sutekh/base/gui/PkgIcon.py\n# Compiled at: 2019-12-11 16:37:48\n\"\"\"Provide a base class for handling the application icon\"\"\"\nfrom pkg_resources import resource_stream\nimport gtk\n\nclass PkgIcon(object):\n \"\"\"Load a gtk Pixbuf object from a package resource file.\"\"\"\n\n def __init__(self, sPkg, sResource):\n oLoader = gtk.gdk.PixbufLoader()\n oFile = resource_stream(sPkg, sResource)\n oLoader.write(oFile.read())\n oFile.close()\n oLoader.close()\n self._oIcon = oLoader.get_pixbuf()\n\n def get_pixbuf(self):\n \"\"\"Return the actual icon\"\"\"\n return self._oIcon","sub_path":"pycfiles/Sutekh-1.0.0-py2.7/PkgIcon.py","file_name":"PkgIcon.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"560367884","text":"# move_test_images.py\n# 制作出来测试部分的草图和照片, (下一个文件中是制作tfrecords)\n# 文档中相关的操作. 全都转变成了对应的方法. \n\n# 因为仅仅涉及到了移动, 所以就用到了os模块和shutil模块. \nimport os\nimport shutil\n# 对应的方法, 因为测试的草图125 * 50 = 6250已经是\n# 确定的了, 所以只要移动对应的photo就行了. \ndef GetClaases(photo_dir, need_dict=False):\n ClassesList = os.listdir(photo_dir)\n if not need_dict:\n return ClassesList\n else:\n ClassesLabel = list(range(len(ClassesList)))\n return ClassesList, ClassesLabel\n\n\n# 将原先都进行train的照片, 移动一部分到test中. \n# 最终, 照片分为了2个部分. (train+valid)使用一套, (test)使用一套. \ndef MoveTestPhotos(photo_src, photo_dst, num_in_a_class=5):\n classlist = GetClaases(photo_src)\n # 递归判断目录是否存在, 不存在则创建之. \n if not os.path.exists(photo_dst):\n os.mkdir(photo_dst)\n\n for classname in classlist:\n # 如果没有对应的文件\n if not os.path.exists(photo_dst+classname):\n os.mkdir(photo_dst+classname)\n\n # 一个类中的所有照片. \n photolist = os.listdir(photo_src+classname)\n # 选取前面的10张, 来移动. \n photolist_totest = photolist[0:num_in_a_class]\n # 先尝试下用 \"Comprehension\" 的方式来进行书写, 不行切换成for\n # 应该采用两步的拼接来完成任务. 写法有点粗糙. \n [shutil.move(photo_src+classname+'/'+photoname_totest\n , photo_dst+classname+'/'+photoname_totest) for photoname_totest in photolist_totest]\n\n# 和前面的方法是一样的, 就是更换了一些名字而已. \n# 最终, 草图分为了3个部分. (train)使用一套, (valid)使用一套, (test)使用一套. \ndef MoveValidSketches(sketch_src, sketch_dst, num_in_a_class=5):\n classlist = GetClaases(sketch_src)\n # 递归判断目录是否存在, 不存在则创建之. \n if not os.path.exists(sketch_dst):\n os.mkdir(sketch_dst)\n\n for classname in classlist:\n # 如果没有对应的文件\n if not os.path.exists(sketch_dst+classname):\n os.mkdir(sketch_dst+classname)\n\n # 一个类中的所有照片. \n sketchlist = os.listdir(sketch_src+classname)\n # 选取前面的10张, 来移动. \n sketchlist_tovalid = sketchlist[0:num_in_a_class]\n # 先尝试下用 \"Comprehension\" 的方式来进行书写, 不行切换成for\n # 应该采用两步的拼接来完成任务. 写法有点粗糙. \n [shutil.move(sketch_src+classname+'/'+sketchname_tovalid\n , sketch_dst+classname+'/'+sketchname_tovalid) \n for sketchname_tovalid in sketchlist_tovalid]\n\n\n# 仅在本文件下运行. \nif __name__ == \"__main__\":\n # 提醒: 移动是\"不可逆\"的操作, 需要注意!!!\n # 如果想要移动回去, 将两个倒过来, 或者是, \n # if need_reverse:\n # photo_src, photo_dst = photo_dst, photo_src\n # 就算是完成任务了~ \n # 下次继续执行指令的时候, 应该写作: num_in_a_class_add=5\n # 注意第三个参数的使用!!!\n # 我在这里留下了备份, 要是需要更改的话, 需要进行删除... \n \n # 代码, 只有使用的时候, 再反注释这些. \n # MoveTestPhotos('../network/dataset_raw/photo/train/', '../network/dataset_raw/photo/test/', 10)\n # MoveValidSketches('../network/dataset_raw/sketch/train/', '../network/dataset_raw/sketch/valid/', 10)\n # pass\n MoveTestPhotos(\n '../network/dataset_raw/edgemap/valid/', \n '../network/dataset_raw/edgemap/test/', \n 10,\n )\n\n# 最终的划分效果: \n# sketch:\n# train: 67981 valid: 1250=125*10(from previous train) test: 6250=125*50\n# photo:\n# train(+valid): 11250=125*90 test: 1250=125*10\n# train/test: \n# sketch = 67981/6250 = 10.87696 \n# photo = 11250/1250 = 9.00000\n# ?近似相等, 还是...? 我希望每个类别是划分尽量整齐的. \n","sub_path":"code/make_and_load_data/move_test_images.py","file_name":"move_test_images.py","file_ext":"py","file_size_in_byte":4046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"94496451","text":"# $Id: TwistedRenderer.py,v 1.7.2.3 2006/12/25 13:05:46 marcusva Exp $\n#\n# Copyright (c) 2005, Benjamin Olsen\n# All rights reserved.\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"A Renderer that plays nicely with a threadedselectreactor from\nTwisted.\n\nMany thanks to Bob Ippolito for his work on threadedselectreactor and\nthe pygamedemo.py he created.\n\nWritten by Ben Olsen \n\"\"\"\n\nfrom pygame import QUIT, event\nfrom pygame import error as PygameError\nfrom pygame import time as PygameTime\nfrom ocempgui.widgets import Renderer\nfrom ocempgui.widgets.Constants import *\n\nclass TwistedRenderer (Renderer):\n \"\"\"TwistedRenderer () -> TwistedRenderer\n\n A Renderer that allows the easy integration with Twisted.\n \n Because Twisted's threadedselectreactor *must* be shut down before\n the main loop shuts down, this Renderer will keep running until\n explicitly told to stop.\n\n Before starting the main loop, this Renderer will check to see if it\n has a Twisted reactor attached to it. This is an attribute set like\n any of the normal Renderer attributes:\n\n self.reactor = reactor\n\n If self.reactor is None (default), this will behave like a normal\n Renderer. If self.reactor has been set, the QUIT signal will call\n reactor.stop(), and then wait for reactor.addSystemEventTrigger to\n call self.stop(). This function will then stop the main loop.\n\n Usage\n -----\n Install the threadedselectreactor instead of the default reactor:\n\n from twisted.internet.threadedselectreactor import install\n install()\n from twisted.internet import reactor\n\n In the main section of your program, where you create the Renderer,\n just set TwistedRenderer's reactor:\n\n re = TwistedRenderer()\n re.reactor = reactor\n\n Everything else is handled internally by TwistedRenderer.\n\n Attributes:\n reactor - The twisted reactor attached to the TwistedRenderer.\n \"\"\"\n def __init__ (self):\n Renderer.__init__ (self)\n self._reactor = None\n self._running = False\n \n def start (self):\n \"\"\"T.start () -> None\n \n Overrides default start() to use self._running. If a reactor is\n attached, interleave self.waker\n \"\"\"\n self._running = 1\n if self._reactor != None:\n self._reactor.interleave (self.waker)\n self._loop()\n \n def stop (self):\n \"\"\"T.stop () -> None\n\n Tells the internal loop to stop running.\n \"\"\"\n self._running = False\n \n def set_reactor (self, reactor):\n \"\"\"T.set_reactor (...) -> None\n\n Sets the internal reactor.\n \"\"\"\n if not hasattr (reactor, 'interleave'):\n raise AttributeError (\"interleave() method not found in %s\" %\n reactor)\n self._reactor = reactor\n self._reactor.addSystemEventTrigger ('after', 'shutdown', self.stop)\n\n def waker (self, func):\n \"\"\"T.waker (...) -> None\n\n Used in threadedselectreactor.interleave.\n \"\"\"\n event.post (event.Event (SIG_TWISTED, data=func))\n\n def distribute_events (self, *events):\n \"\"\"T.distribute_events (...) -> None\n\n Overrides default distribute_events() to check for a reactor. If\n a reactor is found, the QUIT signal will call reactor.stop(). If\n there's no reactor attached, a QUIT signal will simply set\n self._running to False.\n \"\"\"\n for event in events:\n if event.type == QUIT:\n if self._reactor != None:\n self._reactor.stop ()\n else:\n self._running = False\n\n elif event.type == SIG_TWISTED:\n event.data ()\n else:\n Renderer.distribute_events (self, (event))\n return True\n\n def _loop (self):\n \"\"\"T._loop () -> None\n\n Overrides default _loop() so that it will not stop until\n self._running is false.\n \"\"\"\n # Emit the tick event every 10 ms.\n PygameTime.set_timer (SIG_TICK, 10)\n delay = PygameTime.delay\n event_get = event.get\n pump = event.pump\n\n while self._running:\n pump ()\n # Get Events and distribute them.\n events = event_get ()\n\n if not self.distribute_events (*events):\n return # QUIT event\n if self.timer > 0:\n delay (1000 / self.timer)\n\n reactor = property (lambda self: self._reactor,\n lambda self, var: self.set_reactor (var),\n doc = \"The twisted reactor attached to the Renderer.\")\n","sub_path":"ocempgui/widgets/TwistedRenderer.py","file_name":"TwistedRenderer.py","file_ext":"py","file_size_in_byte":5915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"420200649","text":"\"\"\"Neighbor sampling APIs\"\"\"\n\nfrom .._ffi.function import _init_api\nfrom .. import backend as F\nfrom ..base import DGLError, EID\nfrom ..heterograph import DGLHeteroGraph\nfrom .. import ndarray as nd\nfrom .. import utils\nfrom .. import subgraph as subg\nfrom .dataloader import BlockSampler, assign_block_eids\n\n__all__ = [\n 'sample_neighbors',\n 'select_topk',\n 'MultiLayerNeighborSampler']\n\ndef sample_neighbors(g, nodes, fanout, edge_dir='in', prob=None, replace=False):\n \"\"\"Sample neighboring edges of the given nodes and return the induced subgraph.\n\n For each node, a number of inbound (or outbound when ``edge_dir == 'out'``) edges\n will be randomly chosen. The graph returned will then contain all the nodes in the\n original graph, but only the sampled edges.\n\n Node/edge features are not preserved. The original IDs of\n the sampled edges are stored as the `dgl.EID` feature in the returned graph.\n\n Parameters\n ----------\n g : DGLGraph\n The graph\n nodes : tensor or dict\n Node IDs to sample neighbors from.\n\n This argument can take a single ID tensor or a dictionary of node types and ID tensors.\n If a single tensor is given, the graph must only have one type of nodes.\n fanout : int or dict[etype, int]\n The number of edges to be sampled for each node on each edge type.\n\n This argument can take a single int or a dictionary of edge types and ints.\n If a single int is given, DGL will sample this number of edges for each node for\n every edge type.\n\n If -1 is given for a single edge type, all the neighboring edges with that edge\n type will be selected.\n edge_dir : str, optional\n Determines whether to sample inbound or outbound edges.\n\n Can take either ``in`` for inbound edges or ``out`` for outbound edges.\n prob : str, optional\n Feature name used as the (unnormalized) probabilities associated with each\n neighboring edge of a node. The feature must have only one element for each\n edge.\n\n The features must be non-negative floats, and the sum of the features of\n inbound/outbound edges for every node must be positive (though they don't have\n to sum up to one). Otherwise, the result will be undefined.\n replace : bool, optional\n If True, sample with replacement.\n\n Returns\n -------\n DGLGraph\n A sampled subgraph containing only the sampled neighboring edges.\n\n Examples\n --------\n Assume that you have the following graph\n\n >>> g = dgl.graph(([0, 0, 1, 1, 2, 2], [1, 2, 0, 1, 2, 0]))\n\n And the weights\n\n >>> g.edata['prob'] = torch.FloatTensor([0., 1., 0., 1., 0., 1.])\n\n To sample one inbound edge for node 0 and node 1:\n\n >>> sg = dgl.sampling.sample_neighbors(g, [0, 1], 1)\n >>> sg.edges(order='eid')\n (tensor([1, 0]), tensor([0, 1]))\n >>> sg.edata[dgl.EID]\n tensor([2, 0])\n\n To sample one inbound edge for node 0 and node 1 with probability in edge feature\n ``prob``:\n\n >>> sg = dgl.sampling.sample_neighbors(g, [0, 1], 1, prob='prob')\n >>> sg.edges(order='eid')\n (tensor([2, 1]), tensor([0, 1]))\n\n With ``fanout`` greater than the number of actual neighbors and without replacement,\n DGL will take all neighbors instead:\n\n >>> sg = dgl.sampling.sample_neighbors(g, [0, 1], 3)\n >>> sg.edges(order='eid')\n (tensor([1, 2, 0, 1]), tensor([0, 0, 1, 1]))\n \"\"\"\n if not isinstance(nodes, dict):\n if len(g.ntypes) > 1:\n raise DGLError(\"Must specify node type when the graph is not homogeneous.\")\n nodes = {g.ntypes[0] : nodes}\n nodes = utils.prepare_tensor_dict(g, nodes, 'nodes')\n nodes_all_types = []\n for ntype in g.ntypes:\n if ntype in nodes:\n nodes_all_types.append(F.to_dgl_nd(nodes[ntype]))\n else:\n nodes_all_types.append(nd.array([], ctx=nd.cpu()))\n\n if not isinstance(fanout, dict):\n fanout_array = [int(fanout)] * len(g.etypes)\n else:\n if len(fanout) != len(g.etypes):\n raise DGLError('Fan-out must be specified for each edge type '\n 'if a dict is provided.')\n fanout_array = [None] * len(g.etypes)\n for etype, value in fanout.items():\n fanout_array[g.get_etype_id(etype)] = value\n fanout_array = F.to_dgl_nd(F.tensor(fanout_array, dtype=F.int64))\n\n if prob is None:\n prob_arrays = [nd.array([], ctx=nd.cpu())] * len(g.etypes)\n else:\n prob_arrays = []\n for etype in g.canonical_etypes:\n if prob in g.edges[etype].data:\n prob_arrays.append(F.to_dgl_nd(g.edges[etype].data[prob]))\n else:\n prob_arrays.append(nd.array([], ctx=nd.cpu()))\n\n subgidx = _CAPI_DGLSampleNeighbors(g._graph, nodes_all_types, fanout_array,\n edge_dir, prob_arrays, replace)\n induced_edges = subgidx.induced_edges\n ret = DGLHeteroGraph(subgidx.graph, g.ntypes, g.etypes)\n for i, etype in enumerate(ret.canonical_etypes):\n ret.edges[etype].data[EID] = induced_edges[i]\n return ret\n\ndef select_topk(g, k, weight, nodes=None, edge_dir='in', ascending=False):\n \"\"\"Select the neighboring edges with k-largest (or k-smallest) weights of the given\n nodes and return the induced subgraph.\n\n For each node, a number of inbound (or outbound when ``edge_dir == 'out'``) edges\n with the largest (or smallest when ``ascending == True``) weights will be chosen.\n The graph returned will then contain all the nodes in the original graph, but only\n the sampled edges.\n\n Node/edge features are not preserved. The original IDs of\n the sampled edges are stored as the `dgl.EID` feature in the returned graph.\n\n Parameters\n ----------\n g : DGLGraph\n The graph\n k : int or dict[etype, int]\n The number of edges to be selected for each node on each edge type.\n\n This argument can take a single int or a dictionary of edge types and ints.\n If a single int is given, DGL will select this number of edges for each node for\n every edge type.\n\n If -1 is given for a single edge type, all the neighboring edges with that edge\n type will be selected.\n weight : str\n Feature name of the weights associated with each edge. The feature should have only\n one element for each edge. The feature can be either int32/64 or float32/64.\n nodes : tensor or dict, optional\n Node IDs to sample neighbors from.\n\n This argument can take a single ID tensor or a dictionary of node types and ID tensors.\n If a single tensor is given, the graph must only have one type of nodes.\n\n If None, DGL will select the edges for all nodes.\n edge_dir : str, optional\n Determines whether to sample inbound or outbound edges.\n\n Can take either ``in`` for inbound edges or ``out`` for outbound edges.\n ascending : bool, optional\n If True, DGL will return edges with k-smallest weights instead of\n k-largest weights.\n\n Returns\n -------\n DGLGraph\n A sampled subgraph containing only the sampled neighboring edges.\n\n Examples\n --------\n >>> g = dgl.graph(([0, 0, 1, 1, 2, 2], [1, 2, 0, 1, 2, 0]))\n >>> g.edata['weight'] = torch.FloatTensor([0, 1, 0, 1, 0, 1])\n >>> sg = dgl.sampling.select_topk(g, 1, 'weight')\n >>> sg.edges(order='eid')\n (tensor([2, 1, 0]), tensor([0, 1, 2]))\n \"\"\"\n # Rectify nodes to a dictionary\n if nodes is None:\n nodes = {ntype: F.arange(0, g.number_of_nodes(ntype)) for ntype in g.ntypes}\n elif not isinstance(nodes, dict):\n if len(g.ntypes) > 1:\n raise DGLError(\"Must specify node type when the graph is not homogeneous.\")\n nodes = {g.ntypes[0] : nodes}\n\n # Parse nodes into a list of NDArrays.\n nodes = utils.prepare_tensor_dict(g, nodes, 'nodes')\n nodes_all_types = []\n for ntype in g.ntypes:\n if ntype in nodes:\n nodes_all_types.append(F.to_dgl_nd(nodes[ntype]))\n else:\n nodes_all_types.append(nd.array([], ctx=nd.cpu()))\n\n if not isinstance(k, dict):\n k_array = [int(k)] * len(g.etypes)\n else:\n if len(k) != len(g.etypes):\n raise DGLError('K value must be specified for each edge type '\n 'if a dict is provided.')\n k_array = [None] * len(g.etypes)\n for etype, value in k.items():\n k_array[g.get_etype_id(etype)] = value\n k_array = F.to_dgl_nd(F.tensor(k_array, dtype=F.int64))\n\n weight_arrays = []\n for etype in g.canonical_etypes:\n if weight in g.edges[etype].data:\n weight_arrays.append(F.to_dgl_nd(g.edges[etype].data[weight]))\n else:\n raise DGLError('Edge weights \"{}\" do not exist for relation graph \"{}\".'.format(\n weight, etype))\n\n subgidx = _CAPI_DGLSampleNeighborsTopk(\n g._graph, nodes_all_types, k_array, edge_dir, weight_arrays, bool(ascending))\n induced_edges = subgidx.induced_edges\n ret = DGLHeteroGraph(subgidx.graph, g.ntypes, g.etypes)\n for i, etype in enumerate(ret.canonical_etypes):\n ret.edges[etype].data[EID] = induced_edges[i]\n return ret\n\n\nclass MultiLayerNeighborSampler(BlockSampler):\n \"\"\"Sampler that builds computational dependency of node representations via\n neighbor sampling for multilayer GNN.\n\n This sampler will make every node gather messages from a fixed number of neighbors\n per edge type. The neighbors are picked uniformly.\n\n Parameters\n ----------\n fanouts : list[int] or list[dict[etype, int] or None]\n List of neighbors to sample per edge type for each GNN layer, starting from the\n first layer.\n\n If the graph is homogeneous, only an integer is needed for each layer.\n\n If None is provided for one layer, all neighbors will be included regardless of\n edge types.\n\n If -1 is provided for one edge type on one layer, then all inbound edges\n of that edge type will be included.\n replace : bool, default True\n Whether to sample with replacement\n return_eids : bool, default False\n Whether to return edge IDs of the original graph in the sampled blocks.\n\n If True, the edge IDs will be stored as ``dgl.EID`` feature for each edge type.\n\n Examples\n --------\n To train a 3-layer GNN for node classification on a set of nodes ``train_nid`` on\n a homogeneous graph where each node takes messages from all neighbors (assume\n the backend is PyTorch):\n >>> sampler = dgl.sampling.NeighborSampler([None, None, None])\n >>> collator = dgl.sampling.NodeCollator(g, train_nid, sampler)\n >>> dataloader = torch.utils.data.DataLoader(\n ... collator.dataset, collate_fn=collator.collate,\n ... batch_size=1024, shuffle=True, drop_last=False, num_workers=4)\n >>> for blocks in dataloader:\n ... train_on(blocks)\n\n If we wish to gather from 5 neighbors on the first layer, 10 neighbors on the second,\n and 15 layers on the third:\n >>> sampler = dgl.sampling.NeighborSampler([5, 10, 15])\n\n If training on a heterogeneous graph and you want different number of neighbors for each\n edge type, one should instead provide a list of dicts. Each dict would specify the\n number of neighbors to pick per edge type.\n >>> sampler = dgl.sampling.NeighborSampler([\n ... {('user', 'follows', 'user'): 5,\n ... ('user', 'plays', 'game'): 4,\n ... ('game', 'played-by', 'user'): 3}] * 3)\n \"\"\"\n def __init__(self, fanouts, replace=False, return_eids=False):\n super().__init__(len(fanouts))\n\n self.fanouts = fanouts\n self.replace = replace\n self.return_eids = return_eids\n if return_eids:\n self.set_block_postprocessor(assign_block_eids)\n\n def sample_frontier(self, block_id, g, seed_nodes, *args, **kwargs):\n fanout = self.fanouts[block_id]\n if fanout is None:\n frontier = subg.in_subgraph(g, seed_nodes)\n else:\n frontier = sample_neighbors(g, seed_nodes, fanout, replace=self.replace)\n return frontier\n\n_init_api('dgl.sampling.neighbor', __name__)\n","sub_path":"python/dgl/sampling/neighbor.py","file_name":"neighbor.py","file_ext":"py","file_size_in_byte":12243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"2127232","text":"# Function to build a url for http request\n# Expects args to be in the format of header=value (str)\n\ndef build_url(url, *argv):\n if len(argv) == 0:\n return url\n url += '?'\n \n # start building http request\n for arg in argv:\n url += arg\n url += '&'\n url = url[:-1]\n \n return url\n\n\ndef build_twitch_streams_url(url, first, game_id, after):\n return_url = ''\n if game_id != '0':\n if after != '0':\n return_url = build_url(url, \"first=\" + first, \"game_id=\" + game_id, \"after=\" + after)\n else:\n return_url = build_url(url, \"first=\" + first, \"game_id=\" + game_id)\n else:\n if after != '0':\n return_url = build_url(url, \"first=\" + first, \"after=\" + after)\n else:\n return_url = build_url(url, \"first=\" + first)\n\n return return_url","sub_path":"utils/url_builder.py","file_name":"url_builder.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"48021406","text":"import random\nimport sys\nimport math\n\nimport csv\nfrom networkx.generators.random_graphs import erdos_renyi_graph\n\nUSER_NUM = 150\nPOST_NUM = 50\nRANGE = \"ALL\"\n\nusers = USER_NUM*[0]\nposts = POST_NUM*[0]\n\ngraph = {}\n\nfor i in range(USER_NUM):users[i]=round(random.uniform(-1,1),2)\n\n# for i in range(USER_NUM//2):users[i]=round(random.uniform(0,0.5),2)\n# for i in range(USER_NUM//2,USER_NUM):users[i]=round(random.uniform(-0.5,0),2)\n\nfor i in range(POST_NUM):posts[i]=round(random.uniform(-1,1),2)\n# for i in range(POST_NUM//2):posts[i]=round(random.uniform(0.7,1),2)\n# for i in range(POST_NUM//2,POST_NUM):posts[i]=round(random.uniform(-1,-0.7),2)\n\n# users.sort()\n# posts.sort()\ng = erdos_renyi_graph(USER_NUM,0.5)\nfor x,y in g.edges:\n\tif x not in graph:graph[x] = []\n\tif y not in graph:graph[y] = []\n\tgraph[x].append(y)\n\tgraph[y].append(x)\n\ndatafile = \"data_\" + str(USER_NUM) + \"_\" + str(POST_NUM) + \"_\" + RANGE + \".csv\"\n\nwith open(datafile, 'w') as csvfile:\n\tcsvwriter = csv.writer(csvfile)\n\n\tcsvwriter.writerow(users)\n\tcsvwriter.writerow(posts)\n\n\tfor i in graph:\n\t\ttemp = graph[i].copy()\n\t\ttemp.insert(0, i)\n\t\tcsvwriter.writerow(temp)\t\t\n\n\n","sub_path":"generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"192999579","text":"import datetime\nimport json\nimport queue\nimport secrets\nimport threading\nfrom collections import deque\nfrom timeit import default_timer as timer\nfrom urllib.parse import urlparse\n\nfrom bs4 import BeautifulSoup\nfrom requests import Request, Session\n\nfrom scrappers.apps.config.http import user_agent\n# from scrappers.apps.config.http.utilities import UtilitiesMixin\nfrom scrappers.apps.config.messages import Error, Info\n\n\nclass RequestsManager:\n \"\"\"Base manager used to send requests to the internet\n \"\"\"\n session = Session()\n request_errors = []\n\n @classmethod\n def get(cls, urls:list, **kwargs):\n threads = []\n responses = []\n\n with cls.session as new_session:\n def new_session_wrapper(request):\n response = new_session.send(request)\n if response.status_code == 200:\n # TODO: Apparently the deque()\n # mutates. Maybe we need to create\n # and instance in the method\n responses.append(response)\n else:\n cls.update(cls, url)\n\n for index, url in enumerate(urls):\n if isinstance(url, (tuple, list)):\n url = url[0]\n if not url.startswith('http'):\n raise ValueError(f'Url should start with http or https. In the case where you are using a tuple, the url should be at position 0. Received: {url[0]}')\n thread = threading.Thread(\n target=new_session_wrapper,\n args=[cls.prepare(cls, url, **kwargs)]\n )\n threads.append(thread)\n\n if 'limit_to' in kwargs:\n limit_to = kwargs['limit_to']\n has_reached_limit = all([index == limit_to, limit_to > 0])\n if has_reached_limit:\n break\n\n if threads:\n for thread in threads:\n thread.start()\n if thread.is_alive():\n print('GET HTTP/1.1', '--')\n thread.join()\n if len(urls) == 1:\n return responses[0]\n return responses\n\n def prepare(self, url, **headers):\n \"\"\"This definition prepares a request to send to the web.\n This definition was structured so that the prepared request\n can be modified until the last moment.\n \"\"\"\n base_headers = {\n 'User-Agent': user_agent.get_rand_agent()\n }\n if headers:\n base_headers = {**base_headers, **headers}\n\n request = Request(method='GET', url=url, headers=headers)\n prepared_request = self.session.prepare_request(request)\n return prepared_request\n\n def update(self, url, **kwargs):\n \"\"\"Updates the request error stack\n \"\"\"\n return self.request_errors.append((url, Error('The request with \"%s\" was not successful' % url)))\n\n def beautify(self, urls:list, **kwargs):\n \"\"\"Returns BeautifulSoup objects\n \"\"\"\n soups = []\n responses = self.get(urls, **kwargs)\n for response in responses:\n soups.append(BeautifulSoup(response.text, 'html.parser'))\n return soups\n\n def beautify_single(self, url):\n \"\"\"Returns a BeautifulSoup object\n \"\"\"\n response = self.get([url])\n return BeautifulSoup(response.text, 'html.parser')\n","sub_path":"apps/config/http/managers.py","file_name":"managers.py","file_ext":"py","file_size_in_byte":3463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"241628078","text":"import pandas as pd\n\n# TARGET_RATES = [0.05, 0.06, 0.07, 0.08]\n# DATES_RANGE = [(30, 90), (60, 180), (90, 270)]\nTARGET_RATES = [0.06]\nDATES_RANGE = [(30, 360)]\nDATA_DIR = 'D:/workspace/zjsxzy_in_js/strategy/目标止盈策略'\n\ndef main():\n for target in TARGET_RATES:\n\n output_df = pd.DataFrame(columns=['最短运作期', \n '最长运作期', \n '止盈概率', \n '负收益率占比', \n '达成目标平均收益率', \n '未达成目标平均收益率', \n '最大回撤',\n '第二天年化收益率小于目标的概率'], index=range(len(DATES_RANGE)))\n\n for i, (least_days, most_days) in enumerate(DATES_RANGE):\n filename = '%s/data/止盈内部基金组合统计_T=%.2f_LD=%d_MD=%d.xlsx'%(DATA_DIR, target, least_days, most_days)\n df = pd.read_excel(filename)\n df = df.dropna()\n # 考虑投顾费和申赎费的最大回撤\n rate = 0.005 + 0.001 \n df['最大回撤'] = df['最大回撤'] - rate\n # 达标当天赎回,第二天到手收益率\n df['第二天年化收益率'] = df['年化收益率'] * (df['累计天数'] / (df['累计天数'] + 1))\n\n output_df.loc[i]['最短运作期'] = least_days\n output_df.loc[i]['最长运作期'] = most_days\n\n total_size = df.shape[0]\n get_target_size = df[df['年化收益率'] >= target].shape[0]\n neg_size = df[df['年化收益率'] < 0].shape[0]\n next_day_size = df[(df['年化收益率'] >= target) & (df['第二天年化收益率'] < target)].shape[0]\n\n output_df.loc[i]['止盈概率'] = get_target_size * 1.0 / total_size\n output_df.loc[i]['负收益率占比'] = neg_size * 1.0 / total_size\n output_df.loc[i]['达成目标平均收益率'] = df[df['年化收益率'] >= target]['年化收益率'].mean()\n output_df.loc[i]['未达成目标平均收益率'] = df[df['年化收益率'] < target]['年化收益率'].mean()\n output_df.loc[i]['最大回撤'] = df['最大回撤'].max()\n output_df.loc[i]['第二天年化收益率小于目标的概率'] = next_day_size * 1.0 / get_target_size\n \n output_df.to_excel('%s/data/止盈内部基金组合统计-目标收益率=%.2f.xlsx'%(DATA_DIR, target))\n\nif __name__ == \"__main__\":\n main()","sub_path":"strategy/目标止盈策略/src/statistics.py","file_name":"statistics.py","file_ext":"py","file_size_in_byte":2572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"572583483","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 6 15:07:09 2020\n\n@author: s-long.bao\n\"\"\"\n\n# import library\nimport os\nimport sys\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport xgboost as xgb\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import TimeSeriesSplit\nfrom sklearn import metrics\nimport pdb\n\n# parameters\nthreshold = 0.9\nstocks = [\n 88480,36680,65720,67400,94240,\n 45630,21600,39900,37580,14350,\n 45650,45920,45880,36720,44250,\n 36920,75640,43820,39620,39060\n]\n\ndrop_cols = ['ld_cp_lending_nominal_spot',\n 'ld_own_stock_nominal_system',\n 'ld_lending_nominal_system',\n 'ld_lending_nominal_general',\n 'ld_own_stock_nominal_general',\n 'ld_cp_lending_nominal',\n 'ld_own_stock_nominal',\n 'ld_own_stock_nominal_spot',\n 'ld_lending_ratio',\n ]\n\n# import data\ndata_dir = r'C:\\Data_Science_Projects\\8.Stock_Lending\\data'\ndf_raw = pd.read_csv(os.path.join(data_dir,'data_source.zip'),\n index_col='mg_date', parse_dates=True)\n\ndef preprocess(stock, pct_change=True):\n df = df_raw[df_raw.mg_code == stock]\n df['target'] = (df.ld_lending_ratio >= threshold).astype(float)\n df.dropna(inplace=True)\n df.drop(columns='mg_code',inplace=True)\n \n if pct_change:\n # feature engineering\n df_pct = df.pct_change()\n df_pct.drop(columns='target',inplace=True)\n df_pct = df_pct.iloc[1:].copy()\n df_pct.dropna(inplace=True, axis=1)\n df_pct.columns = [column + '_pct' for column in df_pct.columns]\n \n # merge dataframe\n df = pd.merge(df,df_pct,on='mg_date',how='left')\n return df\n\ndef baseline_model(df, drop_cols=[]):\n # baseline xgb\n # drop_cols = ['ld_cp_lending_nominal_spot',\n # 'ld_own_stock_nominal_system',\n # 'ld_lending_nominal_system',\n # 'ld_lending_nominal_general',\n # 'ld_own_stock_nominal_general',\n # 'ld_cp_lending_nominal',\n # 'ld_own_stock_nominal',\n # 'ld_own_stock_nominal_spot',\n # ]\n x = df.drop(columns=['target']+drop_cols).values[1:-1,:]\n y = df['target'].values[2:]\n cols = df.columns.drop(['target']+drop_cols)\n \n auc =[]\n feat_importance = []\n tscv = TimeSeriesSplit(n_splits=5)\n for train_index, test_index in tscv.split(x):\n \n #(X_train, X_test,y_train, y_test) = train_test_split(x, y, \n # test_size=0.3, random_state=1)\n \n X_train, X_test = x[train_index], x[test_index]\n y_train, y_test = y[train_index], y[test_index]\n \n # model implementation\n model = xgb.XGBClassifier()\n model.fit(X_train, y_train)\n y_score = model.predict_proba(X_test)[:,1]\n fpr, tpr, thresholds = metrics.roc_curve(y_test, y_score)\n auc = metrics.auc(fpr, tpr)\n feat_importance = pd.DataFrame(model.feature_importances_,index=cols,\n columns=['feature_importance']) \n return model, auc, fpr, tpr, feat_importance\n\n\ndef plot_roc(stock, fpr, tpr, auc):\n # plot ROC curve\n # ax = plt.figure(0)\n plt.plot(fpr, tpr, label=f'{stock} ROC curve (area = %.2f)'%auc)\n plt.legend()\n plt.title('ROC curve')\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.grid(True)\n \n \ndef plot_lending_ratio(stock):\n df_raw[df_raw.mg_code==stock].ld_lending_ratio.plot()\n \ndef run_all(drop_cols=[]):\n feat_importances = pd.DataFrame()\n for stock in stocks:\n df = preprocess(stock)\n model, auc, fpr, tpr, feat_importance, y_score, y_test = baseline_model(df, drop_cols)\n plot_roc(stock, fpr, tpr, auc)\n feat_importance.columns = [stock]\n feat_importances = pd.concat([feat_importances, feat_importance], axis=1)\n return feat_importances\n\ndef run_single(stock, drop_cols=[]):\n df = preprocess(stock)\n model, auc, fpr, tpr, feat_importance, y_score, y_test = baseline_model(df, drop_cols)\n # feat_importance = pd.concat(feat_importance,axis=1).mean(axis=1)\n plot_roc(stock, fpr, tpr, auc)\n feat_importance.columns = [stock]\n return feat_importance\n\nfeat_importances, y_score, y_test = run_all()\nfeat_importances.fillna(0, inplace=True)\nfeat_importances.T.to_csv(os.path.join(data_dir,'feat_importances.csv'))\n\n# plot feature importance\nax = plt.figure(1)\nfeat_importances = feat_importances.loc[feat_importances.mean(axis=1).sort_values().index.values]\nplt.boxplot(feat_importances, labels=feat_importances.index,vert=False)\n\n# plot lending ratio\nfor stock in stocks:\n plot_lending_ratio(stock)","sub_path":"DataScienceProject/24.Stock Lending/src/baseline_model.py","file_name":"baseline_model.py","file_ext":"py","file_size_in_byte":4803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"391190794","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom two_process_nlp.utils import init_embedder\nfrom two_process_nlp.utils import w2e_to_sims\nfrom two_process_nlp.params import to_embedder_name\n\nfrom analyze.utils import gen_param2vals_for_completed_jobs\n\n\nEMBEDDER_NAMES = ['ww', 'wd', 'sg', 'cbow', 'srn', 'lstm', 'random_normal', 'random_uniform']\n\n\nembedder_name2plot_data = {embedder_name: [] for embedder_name in EMBEDDER_NAMES}\njob_name2plot_data = {}\nfor param2val in gen_param2vals_for_completed_jobs():\n embedder_name = to_embedder_name(param2val)\n job_name = param2val['job_name']\n print('\\n==================================\\nUsing param2val for {}'.format(embedder_name))\n embedder = init_embedder(param2val)\n embedder.load_w2e()\n #\n vocab_sims_mat = w2e_to_sims(embedder.w2e, embedder.vocab, embedder.vocab)\n embedder_name2plot_data[embedder_name].append((vocab_sims_mat.mean(), vocab_sims_mat.std()))\n print(embedder_name)\n print(vocab_sims_mat.mean())\n print(vocab_sims_mat.std())\n\n# figure\nembedder_name2color = {embedder_name: plt.cm.get_cmap('tab10')(n)\n for n, embedder_name in enumerate(EMBEDDER_NAMES)}\nfig, ax = plt.subplots(1, figsize=(10, 5), dpi=300)\nplt.title('Cosine similarities between all pairs in vocab')\nnum_x = len(EMBEDDER_NAMES)\nx = np.arange(num_x)\nax.set_xticks(x)\nsorted_embedder_names = list(zip(*sorted(embedder_name2plot_data.items(), key=lambda i: i[1])))[0]\nax.set_xticklabels(sorted_embedder_names)\nax.set_ylabel('Cosine Similarity')\nax.set_xlabel('Embedder')\nax.spines['right'].set_visible(False)\nax.spines['top'].set_visible(False)\nax.tick_params(axis='both', which='both', top=False, right=False)\n# plot\nfor n, embedder_name in enumerate(sorted_embedder_names):\n color = embedder_name2color[embedder_name]\n plot_data = embedder_name2plot_data[embedder_name]\n ys = [pd[0] for pd in plot_data] # only mean\n print(embedder_name)\n print(ys)\n ax.bar(x=n,\n height=np.nanmean(ys),\n yerr=np.nanstd(ys),\n edgecolor='black',\n width=1.0)\nplt.tight_layout()\nplt.show()\n\n\n","sub_path":"plot/plot_vector_sims_by_embedder.py","file_name":"plot_vector_sims_by_embedder.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"328893742","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom rasa_core.channels.console import ConsoleInputChannel\n\n\ndef test_console_input():\n import rasa_core.channels.console\n # Overwrites the input() function and when someone else tries to read\n # something from the command line this function gets called. But instead of\n # waiting input for the user, this simulates the input of\n # \"2\", therefore it looks like the user is always typing \"2\" if someone\n # requests a cmd input.\n\n rasa_core.channels.console.input = lambda _=None: \"Test Input\"\n\n recorded = []\n\n def on_message(message):\n recorded.append(message)\n channel = ConsoleInputChannel()\n channel._record_messages(on_message, max_message_limit=3)\n assert [r.text for r in recorded] == [\"Test Input\",\n \"Test Input\",\n \"Test Input\"]\n\n\ndef test_slack_init_one_parameter():\n from rasa_core.channels.slack import SlackInput\n ch = SlackInput(\"xoxb-test\")\n assert ch.slack_token == \"xoxb-test\"\n assert ch.slack_channel is None\n\n\ndef test_slack_init_two_parameters():\n from rasa_core.channels.slack import SlackInput\n ch = SlackInput(\"xoxb-test\", \"test\")\n assert ch.slack_token == \"xoxb-test\"\n assert ch.slack_channel == \"test\"\n\n\ndef test_is_slack_message_none():\n from rasa_core.channels.slack import SlackInput\n import json\n payload = {}\n slack_message = json.loads(json.dumps(payload))\n assert SlackInput._is_user_message(slack_message) is None\n\n\ndef test_is_slack_message_true():\n from rasa_core.channels.slack import SlackInput\n import json\n event = {}\n event['type'] = 'message'\n event['channel'] = 'C2147483705'\n event['user'] = 'U2147483697'\n event['text'] = 'Hello world'\n event['ts'] = '1355517523'\n payload = json.dumps({'event': event})\n slack_message = json.loads(payload)\n assert SlackInput._is_user_message(slack_message) is True\n\n\ndef test_is_slack_message_false():\n from rasa_core.channels.slack import SlackInput\n import json\n event = {}\n event['type'] = 'message'\n event['channel'] = 'C2147483705'\n event['user'] = 'U2147483697'\n event['text'] = 'Hello world'\n event['ts'] = '1355517523'\n event['bot_id'] = '1355517523' # Results in message being false.\n payload = json.dumps({'event': event})\n slack_message = json.loads(payload)\n assert SlackInput._is_user_message(slack_message) is False\n\n\ndef test_slackbot_init_one_parameter():\n from rasa_core.channels.slack import SlackBot\n ch = SlackBot(\"DummyToken\")\n assert ch.token == \"DummyToken\"\n assert ch.slack_channel is None\n\n\ndef test_slackbot_init_two_parameter():\n import rasa_core.channels.slack\n bot = rasa_core.channels.slack.SlackBot(\"DummyToken\", \"General\")\n assert bot.token == \"DummyToken\"\n assert bot.slack_channel == \"General\"\n\n\n# Use monkeypatch for sending attachments, images and plain text.\ndef test_slackbot_send_attachment_only(monkeypatch):\n def mockreturn(self, method, channel, text, as_user, attachments):\n return attachments\n import rasa_core.channels.slack\n import slackclient\n import json\n monkeypatch.setattr(slackclient.SlackClient, 'api_call', mockreturn)\n bot = rasa_core.channels.slack.SlackBot(\"DummyToken\", \"General\")\n attachment = json.dumps([{\"fallback\": \"Financial Advisor Summary\",\n \"color\": \"#36a64f\", \"author_name\": \"ABE\",\n \"title\": \"Financial Advisor Summary\",\n \"title_link\": \"http://tenfactorialrocks.com\",\n \"image_url\": \"https://r.com/cancel/r12\",\n \"thumb_url\": \"https://r.com/cancel/r12\",\n \"actions\": [{\"type\": \"button\",\n \"text\": \"\\ud83d\\udcc8 Dashboard\",\n \"url\": \"https://r.com/cancel/r12\",\n \"style\": \"primary\"},\n {\"type\": \"button\",\n \"text\": \"\\ud83d\\udccb Download XL\",\n \"url\": \"https://r.com/cancel/r12\",\n \"style\": \"danger\"},\n {\"type\": \"button\",\n \"text\": \"\\ud83d\\udce7 E-Mail\",\n \"url\": \"https://r.com/cancel/r12\",\n \"style\": \"danger\"}],\n \"footer\": \"Powered by 1010rocks\",\n \"ts\": 1531889719}])\n assert bot.send_attachment(\"ID\", attachment) == attachment\n\n\ndef test_slackbot_send_attachment_withtext(monkeypatch):\n def mockreturn(self, method, channel, text, as_user, attachments):\n return attachments+text\n import rasa_core.channels.slack\n import slackclient\n import json\n monkeypatch.setattr(slackclient.SlackClient, 'api_call', mockreturn)\n bot = rasa_core.channels.slack.SlackBot(\"DummyToken\", \"General\")\n text = \"Sample text\"\n attachment = json.dumps([{\"fallback\": \"Financial Advisor Summary\",\n \"color\": \"#36a64f\", \"author_name\": \"ABE\",\n \"title\": \"Financial Advisor Summary\",\n \"title_link\": \"http://tenfactorialrocks.com\",\n \"image_url\": \"https://r.com/cancel/r12\",\n \"thumb_url\": \"https://r.com/cancel/r12\",\n \"actions\": [{\"type\": \"button\",\n \"text\": \"\\ud83d\\udcc8 Dashboard\",\n \"url\": \"https://r.com/cancel/r12\",\n \"style\": \"primary\"},\n {\"type\": \"button\",\n \"text\": \"\\ud83d\\udccb XL\",\n \"url\": \"https://r.com/cancel/r12\",\n \"style\": \"danger\"},\n {\"type\": \"button\",\n \"text\": \"\\ud83d\\udce7 E-Mail\",\n \"url\": \"https://r.com/cancel/r123\",\n \"style\": \"danger\"}],\n \"footer\": \"Powered by 1010rocks\",\n \"ts\": 1531889719}])\n assert bot.send_attachment(\"ID\", attachment, text) == attachment+text\n\n\ndef test_slackbot_send_image_url(monkeypatch):\n def mockreturn(self, method, channel, as_user, attachments):\n return json.dumps(attachments)\n import rasa_core.channels.slack\n import slackclient\n import json\n monkeypatch.setattr(slackclient.SlackClient, 'api_call', mockreturn)\n bot = rasa_core.channels.slack.SlackBot(\"DummyToken\", \"General\")\n url = json.dumps([{\"URL\": \"http://www.rasa.net\"}])\n assert bot.send_image_url(\"ID\", url) == json.dumps(\n [{'image_url': '[{\"URL\": \"http://www.rasa.net\"}]',\n 'text': ''}])\n\n\ndef test_slackbot_send_text(monkeypatch):\n def mockreturn(self, method, channel, as_user, text):\n return text\n import rasa_core.channels.slack\n import slackclient\n monkeypatch.setattr(slackclient.SlackClient, 'api_call', mockreturn)\n bot = rasa_core.channels.slack.SlackBot(\"DummyToken\", \"General\")\n text = \"Some text\" # This text is returned back by the mock.\n assert bot.send_text_message(\"ID\", text) == text\n","sub_path":"tests/test_channels.py","file_name":"test_channels.py","file_ext":"py","file_size_in_byte":7780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"178080305","text":"import numpy as np\nimport healpy as hp\nfrom genesys import Genesys_Class\nimport os\n\nclass Sky_Map(Genesys_Class):\n \"\"\"\n This class has useful utility functions for manipulating HEALPix maps and masks\n Two member variables: sky_map, nside\n If a sky_map is masked, sky_map will contain the masked map in the healpy format\n Healpy mask convention:\n 0(false) -> pixel not seen\n 1(true) -> pixel seen\n \"\"\"\n def __init__(self, map_file_name=None, field=(0), sky_map_np=None, other=None, verbose=False):\n \"\"\"\n Order of preference:\n map_file_name -> read_map_from_file(map_file_name, field) : read healpix map from file\n sky_map_np -> from_np_array(map_np) : assign from a map existing as a numpy array\n other -> copy_attributes(other) : copy constructor\n none -> empty object\n \"\"\"\n if map_file_name is not None:\n self.read_map_from_file(map_file_name=map_file_name, field=field, verbose=verbose)\n elif sky_map_np is not None:\n self.from_np_array(sky_map_np)\n elif other is not None:\n self.copy_attributes(other=other)\n else:\n self.sky_map = None\n self.nside = None\n\n def read_map_from_file(self, map_file_name, field=(0), verbose=False):\n \"\"\"\n The base directory of all maps is assumed to be global_paths['maps_dir']\n \"\"\"\n map_file_path = self.get_path_to_map_file(map_file_name)\n self.sky_map = hp.read_map(map_file_path, field=field, dtype=np.float64, verbose=verbose)\n self.nside = hp.get_nside(self.sky_map)\n\n def from_np_array(self, sky_map_np):\n \"\"\"\n Accepts a HEALPix map in numpy array format\n \"\"\"\n self.sky_map = sky_map_np\n self.nside = hp.get_nside(self.sky_map)\n\n def write_map(self, map_file_name):\n \"\"\"\n Writes map to file\n \"\"\"\n map_file_path = self.get_path_to_map_file(map_file_name)\n hp.write_map(map_file_path, self.sky_map)\n\n #*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*\n # Path naming conventions\n #*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*\n\n def get_path_to_map_file(self, map_file_name):\n map_file_path = os.path.join(self.global_paths['maps_dir'], map_file_name)\n return map_file_path\n\n #*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*\n # Mask routines\n #*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*\n\n def mask_map(self, binary_mask):\n \"\"\"\n MASKS THE GIVEN SKY MAP WITH THE BINARY MASK.\n FOR EXAMPLE, PASSING A SINGLE BINARY MASK FOR A SET OF I,Q,U SKY MAPS, WILL APPLY THE SAME MASK TO ALL THE THREE.\n IN GENERAL, THE MASKS ARE ASSIGNED IN A CYCLIC MANNER. FOR EXAMPLE IF SKY_MAP HAS 4 MAPS AND BINARY_MASK HAS 2 MASKS, THEN:\n binary_mask[0] -> sky_map[0]\n binary_mask[1] -> sky_map[1]\n binary_mask[0] -> sky_map[2]\n binary_mask[1] -> sky_map[3]\n \"\"\"\n # CHECKING THAT THE SKY MAP AND MASK HAVE THE SAME RESOLUTION\n assert self.nside == binary_mask.nside, \"nside of sky map and mask does not match.\\nnside of sky map : {}\\nnside of mask : {}\".format(self.nside, binary_mask.nside)\n\n self.sky_map = hp.ma(self.sky_map) \n self.sky_map.mask = np.logical_not(binary_mask.sky_map)\n\n def get_mask_from_nan(self, labels=None):\n \"\"\"\n Make a mask of the same dimension as the input sky mask.\n A NAN corresponds to an unseen pixel and will be set to 0 in the mask\n \"\"\"\n if labels == None:\n labels = list(self.label_dict.keys())\n binary_mask = np.logical_not(self.sky_map == np.nan)\n boolean_selector = self.subset_label_selector(labels)\n return Sky_Map(sky_map_np=binary_mask[boolean_selector, :], map_labels=labels)\n#\n # def get_mask_from_cutoff(self, maximum=None, minimum=None, labels):\n # if maximum == None:\n # maximum = self.sky_map.max()\n # if minimum == None:\n # minimum = self.sky_map.min()\n # binary_mask = np.ones(sky_mask.shape, dtype=np.int)\n # binary_mask[sky_mask > maximum] = 0\n # binary_mask[sky_mask < minimum] = 0\n # return binary_mask\n#\n # def get_sky_fraction(binary_mask):\n # \"\"\"\n # Returns the fraction of the sky that is valid.\n # The function determines the dimension of the binary mask.\n # It returns a scalar for a single mask. It returns an array of the same number of masks otherwise.\n # \"\"\"\n # dim_mask = hp.maptype(binary_mask)\n # n_pix = 12 * hp.get_nside(binary_mask)**2\n # if dim_mask == 0:\n # sky_fraction = np.sum(binary_mask.astype(np.float)) / n_pix\n # else:\n # sky_fraction = np.sum(binary_mask.astype(np.float), axis=1) / n_pix\n # return sky_fraction\n#\n # def ud_grade_mask(mask, nside_out):\n # \"\"\"\n # ud_grades the nside of the binary mask.\n # Due to degrading, if a pixel has a value < 1, that is set to 0.\n # \"\"\"\n # nside_in = hp.get_nside(mask)\n # mask_new = hp.ud_grade(mask, nside_out=nside_out)\n # if nside_out < nside_in:\n # mask_new[mask_new < 1] = 0.0\n # mask_new.dtype = np.int\n#\n # return mask_new\n\n #*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*\n # Utilities\n #*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*\n\n def subset_label_selector(self, labels):\n boolean_selector = np.full(len(self.label_dict), False)\n for label in list(self.label_dict.keys()):\n if label in labels:\n boolean_selector[self.label_dict[label]] = True \n return boolean_selector\n\n #*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*\n#\n# def apodise_mask_gaussian(mask, fwhm, deg=True):\n # # FWHM in degrees if deg==True, in arcmins otherwise\n # if deg:\n # mask_apodised = hp.smoothing(mask, fwhm=np.radians(fwhm))\n # else:\n # mask_apodised = hp.smoothing(mask, fwhm=np.radians(fwhm/60.0))\n # return mask_apodised\n#\n# def display_mask_statistics(mask):\n # mask_dtype = mask.dtype\n # mask_dim = hp.maptype(mask)\n # nside = hp.get_nside(mask)\n # sky_frac = get_sky_fraction(mask)\n#\n # print(\"#*#*#*\")\n # print(\"d-type\\tdim\\tnside\\tsky-fraction\")\n # print(\"{}\\t{}\\t{}\\t{}\".format(mask_dtype, mask_dim, nside, sky_frac))\n # print(\"#*#*#*\\n\")\n#\n# #*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*\n#\n# def fill_empty_pixels(sky_map, max_iter, fail_fill_value=0, pol=True, verbose=False):\n # \"\"\"\n # Fill pixels with NAN with a fail_fill_value.\n # \"\"\"\n # if np.sum(np.isnan(sky_map)) == 0:\n # if verbose:\n # prompt(\"There are no empty pixels\")\n # return\n#\n # nside = hp.get_nside(sky_map)\n#\n # if pol:\n # dim = sky_map.shape[0]\n # for i in xrange(max_iter):\n # empty_pix = np.where(np.isnan(sky_map[0]))[0]\n # theta, phi = hp.pix2ang(nside, empty_pix)\n # neighbours = hp.get_all_neighbours(nside, theta, phi).T\n # for j in range(dim):\n # fill_values = np.nanmean(sky_map[j][neighbours], axis=-1)\n # sky_map[j][empty_pix] = fill_values\n # if np.sum(np.isnan(sky_map)) == 0:\n # break\n # else:\n # for i in xrange(max_iter):\n # empty_pix = np.where(np.isnan(sky_map))[0]\n # theta, phi = hp.pix2ang(nside, empty_pix)\n # neighbours = hp.get_all_neighbours(nside, theta, phi).T\n # fill_values = np.nanmean(sky_map[neighbours], axis=-1)\n # sky_map[empty_pix] = fill_values\n # if np.sum(np.isnan(sky_map)) == 0:\n # break\n#\n # num_empty_pix = np.sum(np.isnan(sky_map))\n # if num_empty_pix:\n # prompt(\"{} empty pixels remaining after {} iterations. Filling empty pixels with {}\\n\".format(num_empty_pix, max_iter, fail_fill_value))\n # sky_map[np.isnan(sky_map)] = fail_fill_value\n#\n# def deconvolve_map(map_in, fwhm_in=0.0, fwhm_out=0.0, lmax=None, binary_mask=None, pol=False, wiener=True, sky_prior=None):\n #\n # if fwhm_in == fwhm_out:\n # return map_in\n#\n # if lmax is None:\n # lmax = 3*hp.get_nside(map_in) - 1\n#\n # if binary_mask is None:\n # binary_mask = get_mask_from_map(map_in)\n#\n # f_sky = get_sky_fraction(binary_mask, pol)\n#\n # alm_in = su.estimate_alm(map_in, lmax, binary_mask, pol)\n # alm_dec = su.deconvolve_alm(alm_in, fwhm_in=fwhm_in, fwhm_out=fwhm_out, f_sky=f_sky, pol=pol, wiener=True, sky_prior=sky_prior)\n # map_dec = hp.alm2map(alm_dec, nside=hp.get_nside(map_in), pol=pol)\n #\n # return map_dec\n","sub_path":"maps/sky_map.py","file_name":"sky_map.py","file_ext":"py","file_size_in_byte":8901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"613192494","text":"# -*- coding: utf-8 -*-\n\"\"\"\nÉditeur de Spyder\n\nCeci est un script temporaire.\n\"\"\"\n\"\"\"\nJour de la semaine\n\naffiche le jours de la semaine correspondant a la date choisit par rapport a la date du 1er janvier 1973 supposer un lundi\n\n\"\"\"\n\n#question 1\n#retourne vrai si la date est bissextile\ndef watch_if_bissextile (annee) : \n bissextilité1 = annee % 4\n \n annee_bisextile=False\n if (bissextilité1 == 0) :\n annee_bisextile=True\n \n return annee_bisextile\n\n\n\n#question 2 3 4 \n\n#saisir la date a déterminer\nannee = int (input (\"veuillez entrer l'année\\n\"))\nmois = int (input (\"veuillez entrer le mois\\n\"))\njour = int (input (\"veuillez entrer le jour\\n\"))\n\n#liste des nombre de jours par mois \nliste_nb_jour_mois = [31,0,31,30,31,30, 31,31,30,31,30,31]\nif watch_if_bissextile(annee) == False :\n liste_nb_jour_mois [1] = 28\nelse :\n liste_nb_jour_mois [1] = 29\n\n\n#calcule le nombre de jours du début de l'annee jusqu'au mois et jour préciser\ndef mois_jours_to_jours(_jours,_mois):\n nb_jour_mois = 0 \n for numero_mois in range (0,_mois-1) :\n nb_jour_mois = liste_nb_jour_mois [numero_mois] + nb_jour_mois\n \n nb_jours_total = _jours+nb_jour_mois\n return nb_jours_total\n\"\"\"\nretourne le nombre de jours entre deux années du début de la premiere jusqu'au premier janvier de la derniere date exclus\n\"\"\"\ndef nb_jours_btw_2a(_annee1, _annee2):\n nb_jours_total = 0\n nb_jours_annee_bissextile = 366\n nb_jours_annee_non_bissextile = 365\n for year in range(_annee2, _annee1-1):\n if watch_if_bissextile(year) ==True :\n nb_jours_total = nb_jours_total + nb_jours_annee_bissextile\n else:\n nb_jours_total = nb_jours_total + nb_jours_annee_non_bissextile\n return nb_jours_total\n \n#constante date de référence\nannee2 = 1973\n\n\"\"\"\nnb_jours_entre_deux_jours = \n nb_jours_btw_2a(annee, annee2) calcul le nimbre de jours du 1er janvier 1973 jusqu'a l'annee indiquer\n +\n mois_jours_to_jours(jour, mois) calcul le nimbre de jours apres la derniere annee\n\n\"\"\"\nnb_jours_entre_deux_jours = nb_jours_btw_2a(annee, annee2)+mois_jours_to_jours(jour, mois)\n\nliste_jour = [\"lundi\",\"mardi\",\"mercredi\",\"jeudi\",\"vendredi\",\"samedi\",\"dimanche\"]\nprint(liste_jour[nb_jours_entre_deux_jours % 7])\n","sub_path":"Python Project/TD/Archive/Anthony/jour de la semain.py","file_name":"jour de la semain.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"64054750","text":"#!/usr/local/bin/python3\n# -*- coding: UTF-8 -*-\n# 融托金融多个target打包-首界面\n\n\nimport tkinter as tk\nfrom tkinter import *\nfrom tkinter.filedialog import askopenfilename\nfrom tkinter import messagebox\nimport RTJRAutoPackage\n\n\n# 工程路径\ndef select_project_file():\n filename = askopenfilename(filetypes=[('PROJECT', '*.xcodeproj .xcworkspace'), ('All Files', '*')])\n project_file.set(filename)\n\n\n# 判断是否选择了所有的路径\ndef judge_path(project_file_path):\n\n if project_file_path != '':\n print(\"configuration:\", configuration.get())\n print(\"method:\", method.get())\n print(\"scheme:\", scheme_global.get())\n\n if menu_bind() == True:\n RTJRAutoPackage.package_rtjr(project_file_path, scheme_global.get(), configuration.get(), method.get())\n # RTAutoPackage.archive_project(project_file_path, configuration.get(), password_input.get(), method.get())\n else:\n messagebox.showinfo(\"温馨提示\", \"请选择工程路径!\")\n\n\n# OptionMenu按钮的点击\ndef menu_bind():\n if method.get() == \"enterprise\" and scheme_global.get() == \"rongtuojinrong\":\n messagebox.showinfo(\"温馨提示\", \"请正确选择\\n enterprise对应的是企业版的scheme: rongtuojinrongQY,\\n\"\n \"ad-hoc/app-store对应的scheme: rongtuojinrong\")\n return False\n elif scheme_global.get() == \"rongtuojinrongQY\" and method.get() != \"enterprise\":\n messagebox.showinfo(\"温馨提示\", \"请正确选择\\n enterprise对应的是企业版的scheme: rongtuojinrongQY,\\n\"\n \"ad-hoc/app-store对应的scheme: rongtuojinrong\")\n return False\n else:\n return True\n\n print(\"menu_bind ==== method:\", method.get())\n print(\"menu_bind ==== scheme:\", scheme_global.get())\n\n\n# 创建窗口\ndef create_window(application_name):\n root = tk.Tk()\n root.title(application_name) # 父容器标题\n\n # 创建路径\n global project_file # 文件路径\n # global output_path # ipa包输出路径\n global password_input # 开机密码\n\n project_file = StringVar()\n password_input = StringVar()\n # output_path = StringVar()\n\n # 工程路径\n tk.Label(root, text=\"工程路径:\").grid(row=0, column=0)\n tk.Entry(root, textvariable=project_file).grid(row=0, column=1)\n tk.Button(root, text=\"路径选择\", command=select_project_file).grid(row=0, column=2, padx=10, pady=5)\n\n # ipa包输出路径\n tk.Label(root, text=\"输出路径:\").grid(row=1, column=0)\n tk.Label(root, text=\"ipa包输出路径, 默认路径是-Desktop/RTJRipa\").grid(row=1, column=1)\n\n tk.Label(root, text=\"参数设置:\").grid(row=2, column=0)\n\n # 让用户选择 method - 打包的类型\n global method # 打包方法:ad-hoc、enterprise、app-store\n method = StringVar()\n method.set(\"ad-hoc\")\n option_method = OptionMenu(root, method, 'ad-hoc', 'enterprise', 'app-store').grid(row=2, column=1, pady=10)\n # option_method.bind('', menu_bind())\n\n # 全局的scheme\n global scheme_global\n scheme_global = StringVar()\n scheme_global.set(\"rongtuojinrong\")\n # 让用户选择 scheme - 打包的target\n option_scheme = OptionMenu(root, scheme_global, 'rongtuojinrong', 'rongtuojinrongQY').grid(row=2, column=2, pady=10)\n # option_scheme.bind('', menu_bind())\n\n # 让用户选择 CONFIGURATION - 打包的模式\n global configuration # 让用户选择 CONFIGURATION: Debug、Release\n # 打包的模式:Debug、Release\n configuration = StringVar()\n configuration.set('Debug') # 设置默认选中Debug选项\n option_configuration = OptionMenu(root, configuration, 'Debug', 'Release')\n option_configuration.grid(row=2, column=3, pady=10)\n print(\"option_configuration:\", option_configuration)\n # option_configuration.bind('', menu_bind())\n\n # 打包\n tk.Button(root, text=\"开始打包\", command=lambda: judge_path(project_file.get())).grid(row=3, column=1, pady=10)\n\n # option_method.pack()\n # option_method.bind('', menu_bind())\n # option_configuration.bind('', menu_bind())\n # option_configuration.pack()\n # option_scheme.pack()\n # option_scheme.bind('', menu_bind())\n\n root.mainloop()\n\n\nif __name__ == '__main__':\n create_window(\"融托金融\")\n","sub_path":"最新Python打包成可执行文件/RTJRGUI.py","file_name":"RTJRGUI.py","file_ext":"py","file_size_in_byte":4384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"573573641","text":"import os\nimport configparser\nimport codecs\nimport shutil\nfrom bcoding import bdecode\nfrom datetime import datetime\nimport time\nimport random\n\nprint(datetime.now().strftime('%H:%M:%S') + ' Подготовка окружения')\nconfig = configparser.ConfigParser()\nconfig.sections()\nconfig.read('settings.txt')\nbasedir = config['Directories']['basedir']\nresumefile = config['Directories']['resumefile']\nif not os.path.exists(basedir + 'random'):\n os.makedirs(basedir + 'random')\nos.chdir(basedir + 'random')\nhashlabel = {}\nfor root, dirs, files in os.walk(basedir + 'random', topdown=False):\n for name in files:\n os.remove(os.path.join(root, name))\n for name in dirs:\n os.rmdir(os.path.join(root, name))\nshottime = \"%.20f\" % time.time()\nshutil.copy(resumefile, resumefile + '.' + shottime)\nresumefile += '.' + shottime\nprint(datetime.now().strftime('%H:%M:%S') + ' Чтение файла bittorrent, составление первичного массива')\nbitfile = open(resumefile, 'rb')\ntorrent = bdecode(bitfile)\nfor key, value in torrent.items():\n if key != '.fileguard' and key != 'rec':\n path = value.get(u'path')\n labels = value.get(u'labels')\n if not labels:\n value['labels'].append('Empty')\n for label in labels:\n if label in hashlabel:\n hashlabel[label].append(path)\n else:\n hashlabel[label] = [path]\n\ndel torrent\nbitfile.close()\nprint(datetime.now().strftime('%H:%M:%S') + ' Обработка полученных данных, составление списка файлов, запись плейлистов')\nfor key, value in hashlabel.items():\n listfiles = set()\n listfiles.clear()\n for path in value:\n if os.path.isfile(path):\n if any([path.endswith(y) for y in ('.wmv', '.mkv', '.mp4', '.avi', '.asf', '.mpg', '.mpeg', '.flv', 'm2ts', 'mov', '.webm')]):\n listfiles.add(path)\n else:\n for root, dirs, files in os.walk(path):\n for name in files:\n if any([name.endswith(y) for y in ('.wmv', '.mkv', '.mp4', '.avi', '.asf', '.mpg', '.mpeg', '.flv', 'm2ts', 'mov', '.webm')]):\n listfiles.add(os.path.join(root, name))\n count = 0\n if listfiles:\n mpcfile = codecs.open(key + '.mpcpl', 'w', \"utf-8-sig\")\n listfiles = list(listfiles)\n random.shuffle(listfiles)\n for dir in listfiles:\n count += 1\n if count == 1:\n mpcfile.write('MPCPLAYLIST\\n')\n mpcfile.write(str(count) + ',type,0\\n')\n mpcfile.write(str(count) + ',filename,' + dir + '\\n')\n mpcfile.close()\n\nos.remove(resumefile)\nprint(datetime.now().strftime('%H:%M:%S') + ' Выполнено')\ntime.sleep(10)\n","sub_path":"mpcplcreate_uniq_sorted_by_random.py","file_name":"mpcplcreate_uniq_sorted_by_random.py","file_ext":"py","file_size_in_byte":2816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"547843622","text":"from django.db import models\nfrom django_comments.models import Comment\nfrom mptt.models import MPTTModel, TreeForeignKey\nfrom django.contrib.auth.models import User\n\nclass Video(models.Model):\n title = models.CharField(max_length=500)\n author = models.CharField(max_length=500, default=\"noname\")\n actors = models.CharField(max_length=500, default=\"noname\")\n categories = models.CharField(max_length=500, default = 'none', null=True)\n image_name = models.CharField(max_length=500, default = '1')\n video_name = models.CharField(max_length=500, default='1')\n likes = models.PositiveIntegerField(default=0)\n dislikes = models.PositiveIntegerField(default=0)\n views = models.PositiveIntegerField(default=0)\n\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n\n def __unicode__():\n return self.title\n\n def __str__(self):\n return self.title\n\nclass UserProfile(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n likedVideos = models.TextField(default = \"None\")\n dislikedVideos = models.TextField(default = \"None\")\n def __str__(self):\n return self.user.username\n\nclass MPTTComment(MPTTModel, Comment):\n parent = TreeForeignKey('self', null=True, blank=True, related_name=\"children\", on_delete=models.CASCADE)\n object_id = models.PositiveIntegerField(default=0)\n created_at = models.DateTimeField(auto_now=True)\n class MPTTMeta:\n order_insertion_by=['submit_date']\n\n class Meta:\n ordering=['tree_id', 'lft']\n\n class MPTTMeta:\n order_insertion_by = ['-created_at']","sub_path":"main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"640674215","text":"from flask import Flask, render_template,redirect, url_for, request,g\nimport time\n\nfrom readJson import searchQuery\napp = Flask(__name__)\n\n@app.route('/')\ndef home():\n # search = request.args.get('inputButton')\n \n # return redirect(url_for('index.html', name = search))\n return render_template('index.html')\n\n@app.route('/search',methods=[\"GET\",\"POST\"])\ndef search(it=None):\n search_time = time.time()\n name = request.args.get('inputButton')\n a = searchQuery(name)\n search_time = time.time() - search_time\n print(search_time)\n return render_template('search.html',it= a)\n\nif __name__ == \"__main__\":\n app.run(debug=True)","sub_path":"webflask/dulich.py","file_name":"dulich.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"398964193","text":"# ASSIGNMENT-----------------------------------------\n# REQUIRED BELOW-------------------------------------\n\nfrom flask_sqlalchemy import SQLAlchemy\n\nDEFAULT_PET_IMAGE = 'https://mylostpetalert.com/wp-content/themes/mlpa-child/images/nophoto.gif'\n\ndb = SQLAlchemy()\n\n# REQUIRED ABOVE-------------------------------------\n\n\nclass Pet(db.Model):\n \"\"\"Pet Model\"\"\"\n\n __tablename__ = \"pets\"\n\n id = db.Column(db.Integer, primary_key=True) \n name = db.Column(db.Text,nullable=False) \n species = db.Column(db.Text,nullable=False) \n photo_url = db.Column(db.Text)\n age = db.Column(db.Integer)\n notes = db.Column(db.Text)\n available = db.Column(db.Boolean, nullable=False, default=True)\n\n def image_url(self):\n \"\"\" Return image for a pet \"\"\"\n\n return self.photo_url or DEFAULT_PET_IMAGE\n\ndef connect_db(app): # // why is this at the bottom of this assignment was at the top of others.\n db.app = app\n db.init_app(app)\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"598347247","text":"import numpy as np\nfrom torch.utils import data\nfrom torchvision import datasets, transforms\nfrom torch.utils.data import Dataset\nfrom glob import glob\nfrom PIL import Image\nimport os\nimport utils\nimport folder\n\n\ndef get_gsv_dataloader(case, batch_size):\n print('[INFO] Loading datasets: {}'.format(case))\n\n datas = {\n 'GSV': \"/data/dataset/FGDA/semi/nocrop/train_full/\",\n 'Web': \"/data/dataset/FGDA/semi/gsv_100k_unwarp/\",\n 'Webtest': \"/data/dataset/FGDA/semi/gsv_100k_unwarp/test/\",\n }\n means = {\n 'imagenet': [0.485, 0.456, 0.406]\n }\n stds = {\n 'imagenet': [0.229, 0.224, 0.225]\n }\n\n config = {\n 'is_semi': is_semi,\n 'mode' : mode,\n 'is_train' : is_train,\n 'case' : case,\n 'list' : list\n }\n\n img_size = (227, 227)\n\n transform = [\n transforms.Scale(img_size),\n transforms.ToTensor(),\n transforms.Normalize(means['imagenet'], stds['imagenet']),\n ]\n\n data_loader = data.DataLoader(\n dataset=folder.ImageFolder(\n datas[case],\n transform=transforms.Compose(transform),\n ),\n num_workers=16,\n batch_size=batch_size,\n shuffle=True,\n drop_last=True\n )\n\n return data_loader\n","sub_path":"Source_Codes/ResNet/GSV/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"465520993","text":"# Default Imports\nfrom greyatomlib.python_getting_started.q01_read_data.build import read_data\ndata = read_data()\n\n\n# Your Solution\ndef BC_runs(data):\n\n runs = 0\n first_inning = data['innings'][0]['1st innings']['deliveries']\n for deliveries in first_inning:\n for delivery in deliveries:\n batsman_name = deliveries[delivery]['batsman']\n if batsman_name == 'BB McCullum':\n runs = runs + deliveries[delivery]['runs']['batsman']\n\n break\n return(runs)\n \n","sub_path":"q05_runs/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"457561545","text":"from tkinter import *\n\n# Por convensión se llama raíz\n# el mainloop es para ejecutar las interfaces gráficas, es un loop infinito que pemite mostrar las ventanas.\nraiz= Tk()\nraiz.title(\"Ventana de pruebas\")\n\n# Pemite que la ventana no se puede ajustar\n# Los parametros son valores lógicos, sirven tanto el 1,0 como el True,False\n#raiz.resizable(0,0)\n# Permite cambiar el icono de tkinter por defecto de nuestro programa\n#raiz.iconbitmap(\"nombreImagen.co\")\n\n# Permite camibar el tamaño de la ventana\n# La raíz se ajustará automaticamente al tamaño del frame que contiene.\n# raiz.geometry(\"650x350\")\n\n# El config permite cambiar muchas cosas, entre ellas, el color de fondo\nraiz.config(bg=\"gray\", bd=10, relief=\"sunken\", cursor=\"pirate\")\n\n# Creamos el Frame y debemos empaquetarlo en nuestra raíz\nmiFrame = Frame()\n\n# Si le colocamos el argumento side=\"\" se anclará donde le digamos\n# anchor permite ubicar el frame donde queramos porque maneja puntos cardinales\n#miFrame.pack(side=\"left\", anchor=\"s\")\n\n# fill permite rellenar la coordenada especifica\n#miFrame.pack(fill=\"x\")\n# En el caso de Y se debe hacer lo siguiente\nmiFrame.pack(fill=\"y\",expand=True)\n\n# Para expandir ambos se necesita el both y el expand\n#miFrame.pack(fill=\"both\",expand=True)\n\n# Ahora vamos a modificar el frame\n# Primero se debe darle un tamaño y la raíz debe ajustar a ese tamaño automaticamente.\n# El relief sirve para cambiar las características del borde\n# También, debemos colocar bd para cambiar el ancho de los bordes porque por defecto es 0\n# También podemos cambiar el cursor cuando éste pasa por el Frame con cursor\nmiFrame.config(bg=\"Red\",width=650,height=350, bd=10, relief=\"sunken\", cursor=\"hand2\")\n\nraiz.mainloop()\n","sub_path":"interfaz_gráfica/primera_interfaz.py","file_name":"primera_interfaz.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"517127942","text":"\"\"\"Step decorators.\n\nExample:\n\n@given('I have an article')\ndef article(author):\n return create_test_article(author=author)\n\n\n@when('I go to the article page')\ndef go_to_the_article_page(browser, article):\n browser.visit(urljoin(browser.url, '/articles/{0}/'.format(article.id)))\n\n\n@then('I should not see the error message')\ndef no_error_message(browser):\n with pytest.raises(ElementDoesNotExist):\n browser.find_by_css('.message.error').first\n\n\nMultiple names for the steps:\n\n@given('I have an article')\n@given('there is an article')\ndef article(author):\n return create_test_article(author=author)\n\n\nReusing existing fixtures for a different step name:\n\ngiven('I have a beautiful article', fixture='article')\n\n\"\"\"\nimport inspect\nimport sys\n\nimport pytest\n\nfrom pytest_bdd.feature import remove_prefix\nfrom pytest_bdd.types import GIVEN, WHEN, THEN\n\n\nclass StepError(Exception):\n pass\n\n\ndef given(name, fixture=None):\n \"\"\"Given step decorator.\n\n :param name: Given step name.\n :param fixture: Optional name of the fixture to reuse.\n\n :raises: StepError in case of wrong configuration.\n :note: Can't be used as a decorator when the fixture is specified.\n \"\"\"\n name = remove_prefix(name)\n if fixture is not None:\n module = get_caller_module()\n func = getattr(module, fixture, lambda request: request.getfuncargvalue(fixture))\n setattr(module, name, pytest.fixture(lambda: func))\n return _not_a_fixture_decorator\n\n return _step_decorator(GIVEN, name)\n\n\ndef when(name):\n \"\"\"When step decorator.\n\n :param name: Step name.\n :raises: StepError in case of wrong configuration.\n \"\"\"\n return _step_decorator(WHEN, name)\n\n\ndef then(name):\n \"\"\"Then step decorator.\n\n :param name: Step name.\n :raises: StepError in case of wrong configuration.\n \"\"\"\n return _step_decorator(THEN, name)\n\n\ndef _not_a_fixture_decorator(func):\n \"\"\"Function that prevents the decoration.\n\n :param func: Function that is going to be decorated.\n :raises: `StepError` if was used as a decorator.\n \"\"\"\n raise StepError('Cannot be used as a decorator when the fixture is specified')\n\n\ndef _step_decorator(step_type, step_name):\n \"\"\"Step decorator for the type and the name.\n :param step_type: Step type (GIVEN, WHEN or THEN).\n :param step_name: Step name as in the feature file.\n\n :return: Decorator function for the step.\n\n :note: If the step type is GIVEN it will automatically apply the pytest\n fixture decorator to the step function.\n \"\"\"\n step_name = remove_prefix(step_name)\n\n def decorator(func):\n step_func = func\n if step_type == GIVEN:\n if not hasattr(func, '_pytestfixturefunction'):\n # avoid overfixturing of a fixture\n func = pytest.fixture(func)\n step_func = lambda request: request.getfuncargvalue(func.func_name)\n\n step_func.__name__ = step_name\n setattr(get_caller_module(), step_name, pytest.fixture(lambda: step_func))\n return func\n\n return decorator\n\n\ndef get_caller_module(depth=2):\n \"\"\"Return the module of the caller.\"\"\"\n frame = sys._getframe(depth)\n return inspect.getmodule(frame)\n","sub_path":"pytest_bdd/steps.py","file_name":"steps.py","file_ext":"py","file_size_in_byte":3214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"490483204","text":"# -*- coding: utf-8 -*-\nfrom account.models import Teacher, Dean, TeachingSecretary\n\n\ndef user_detail_info(user):\n if user.position == 1:\n user_info = Teacher.objects.get(teacher_info=user)\n user.user_name = user_info.teacher_name\n elif user.position == 2:\n user_info = Dean.objects.get(dean_info=user)\n user.user_name = user_info.dean_name\n elif user.position == 3:\n user_info = TeachingSecretary.objects.get(teaching_secretary_info=user)\n user.user_name = user_info.teaching_secretary_name\n else:\n user.user_name = user.username\n\n user.university = user_info.university\n user.school = user_info.school\n user.major = user_info.major\n return user\n","sub_path":"Jczd_sys/Jczd_Project/common/helper/user_detail_info.py","file_name":"user_detail_info.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"198064592","text":"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Convolutional-recurrent layers.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.keras._impl.keras import activations\nfrom tensorflow.python.keras._impl.keras import backend as K\nfrom tensorflow.python.keras._impl.keras import constraints\nfrom tensorflow.python.keras._impl.keras import initializers\nfrom tensorflow.python.keras._impl.keras import regularizers\nfrom tensorflow.python.keras._impl.keras.engine import InputSpec\nfrom tensorflow.python.keras._impl.keras.layers.recurrent import Recurrent\nfrom tensorflow.python.keras._impl.keras.utils import conv_utils\n\n\nclass ConvRecurrent2D(Recurrent):\n \"\"\"Abstract base class for convolutional recurrent layers.\n\n Do not use in a model -- it's not a functional layer!\n\n Arguments:\n filters: Integer, the dimensionality of the output space\n (i.e. the number output of filters in the convolution).\n kernel_size: An integer or tuple/list of n integers, specifying the\n dimensions of the convolution window.\n strides: An integer or tuple/list of n integers,\n specifying the strides of the convolution.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n padding: One of `\"valid\"` or `\"same\"` (case-insensitive).\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, time, ..., channels)`\n while `channels_first` corresponds to\n inputs with shape `(batch, time, channels, ...)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n dilation_rate: An integer or tuple/list of n integers, specifying\n the dilation rate to use for dilated convolution.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any `strides` value != 1.\n return_sequences: Boolean. Whether to return the last output\n in the output sequence, or the full sequence.\n go_backwards: Boolean (default False).\n If True, rocess the input sequence backwards.\n stateful: Boolean (default False). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.\n\n Input shape:\n 5D tensor with shape `(num_samples, timesteps, channels, rows, cols)`.\n\n Output shape:\n - if `return_sequences`: 5D tensor with shape\n `(num_samples, timesteps, channels, rows, cols)`.\n - else, 4D tensor with shape `(num_samples, channels, rows, cols)`.\n\n # Masking\n This layer supports masking for input data with a variable number\n of timesteps. To introduce masks to your data,\n use an `Embedding` layer with the `mask_zero` parameter\n set to `True`.\n **Note:** for the time being, masking is only supported with Theano.\n\n # Note on using statefulness in RNNs\n You can set RNN layers to be 'stateful', which means that the states\n computed for the samples in one batch will be reused as initial states\n for the samples in the next batch.\n This assumes a one-to-one mapping between\n samples in different successive batches.\n\n To enable statefulness:\n - specify `stateful=True` in the layer constructor.\n - specify a fixed batch size for your model, by passing\n a `batch_input_size=(...)` to the first layer in your model.\n This is the expected shape of your inputs *including the batch\n size*.\n It should be a tuple of integers, e.g. `(32, 10, 100)`.\n\n To reset the states of your model, call `.reset_states()` on either\n a specific layer, or on your entire model.\n \"\"\"\n\n def __init__(self,\n filters,\n kernel_size,\n strides=(1, 1),\n padding='valid',\n data_format=None,\n dilation_rate=(1, 1),\n return_sequences=False,\n go_backwards=False,\n stateful=False,\n **kwargs):\n super(ConvRecurrent2D, self).__init__(**kwargs)\n self.filters = filters\n self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size')\n self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')\n self.padding = conv_utils.normalize_padding(padding)\n self.data_format = conv_utils.normalize_data_format(data_format)\n self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, 2,\n 'dilation_rate')\n self.return_sequences = return_sequences\n self.go_backwards = go_backwards\n self.stateful = stateful\n self.input_spec = [InputSpec(ndim=5)]\n self.state_spec = None\n\n def _compute_output_shape(self, input_shape):\n if isinstance(input_shape, list):\n input_shape = input_shape[0]\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\n if self.data_format == 'channels_first':\n rows = input_shape[3]\n cols = input_shape[4]\n elif self.data_format == 'channels_last':\n rows = input_shape[2]\n cols = input_shape[3]\n rows = conv_utils.conv_output_length(\n rows,\n self.kernel_size[0],\n padding=self.padding,\n stride=self.strides[0],\n dilation=self.dilation_rate[0])\n cols = conv_utils.conv_output_length(\n cols,\n self.kernel_size[1],\n padding=self.padding,\n stride=self.strides[1],\n dilation=self.dilation_rate[1])\n if self.return_sequences:\n if self.data_format == 'channels_first':\n output_shape = [input_shape[0], input_shape[1],\n self.filters, rows, cols]\n elif self.data_format == 'channels_last':\n output_shape = [input_shape[0], input_shape[1],\n rows, cols, self.filters]\n else:\n if self.data_format == 'channels_first':\n output_shape = [input_shape[0], self.filters, rows, cols]\n elif self.data_format == 'channels_last':\n output_shape = [input_shape[0], rows, cols, self.filters]\n\n if self.return_state:\n if self.data_format == 'channels_first':\n output_shapes = [output_shape] + [(input_shape[0],\n self.filters,\n rows,\n cols) for _ in range(2)]\n elif self.data_format == 'channels_last':\n output_shapes = [output_shape] + [(input_shape[0],\n rows,\n cols,\n self.filters) for _ in range(2)]\n return [tensor_shape.TensorShape(shape) for shape in output_shapes]\n return tensor_shape.TensorShape(output_shape)\n\n def get_config(self):\n config = {\n 'filters': self.filters,\n 'kernel_size': self.kernel_size,\n 'strides': self.strides,\n 'padding': self.padding,\n 'data_format': self.data_format,\n 'dilation_rate': self.dilation_rate,\n 'return_sequences': self.return_sequences,\n 'go_backwards': self.go_backwards,\n 'stateful': self.stateful\n }\n base_config = super(ConvRecurrent2D, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass ConvLSTM2D(ConvRecurrent2D):\n \"\"\"Convolutional LSTM.\n\n It is similar to an LSTM layer, but the input transformations\n and recurrent transformations are both convolutional.\n\n Arguments:\n filters: Integer, the dimensionality of the output space\n (i.e. the number output of filters in the convolution).\n kernel_size: An integer or tuple/list of n integers, specifying the\n dimensions of the convolution window.\n strides: An integer or tuple/list of n integers,\n specifying the strides of the convolution.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n padding: One of `\"valid\"` or `\"same\"` (case-insensitive).\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, time, ..., channels)`\n while `channels_first` corresponds to\n inputs with shape `(batch, time, channels, ...)`.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n dilation_rate: An integer or tuple/list of n integers, specifying\n the dilation rate to use for dilated convolution.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any `strides` value != 1.\n activation: Activation function to use.\n If you don't specify anything, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n recurrent_activation: Activation function to use\n for the recurrent step.\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs..\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state..\n bias_initializer: Initializer for the bias vector.\n unit_forget_bias: Boolean.\n If True, add 1 to the bias of the forget gate at initialization.\n Use in combination with `bias_initializer=\"zeros\"`.\n This is recommended in [Jozefowicz et\n al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix.\n recurrent_regularizer: Regularizer function applied to\n the `recurrent_kernel` weights matrix.\n bias_regularizer: Regularizer function applied to the bias vector.\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\")..\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix.\n recurrent_constraint: Constraint function applied to\n the `recurrent_kernel` weights matrix.\n bias_constraint: Constraint function applied to the bias vector.\n return_sequences: Boolean. Whether to return the last output\n in the output sequence, or the full sequence.\n go_backwards: Boolean (default False).\n If True, rocess the input sequence backwards.\n stateful: Boolean (default False). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.\n dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the inputs.\n recurrent_dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the recurrent state.\n\n Input shape:\n - if data_format='channels_first'\n 5D tensor with shape:\n `(samples,time, channels, rows, cols)`\n - if data_format='channels_last'\n 5D tensor with shape:\n `(samples,time, rows, cols, channels)`\n\n Output shape:\n - if `return_sequences`\n - if data_format='channels_first'\n 5D tensor with shape:\n `(samples, time, filters, output_row, output_col)`\n - if data_format='channels_last'\n 5D tensor with shape:\n `(samples, time, output_row, output_col, filters)`\n - else\n - if data_format ='channels_first'\n 4D tensor with shape:\n `(samples, filters, output_row, output_col)`\n - if data_format='channels_last'\n 4D tensor with shape:\n `(samples, output_row, output_col, filters)`\n where o_row and o_col depend on the shape of the filter and\n the padding\n\n Raises:\n ValueError: in case of invalid constructor arguments.\n\n References:\n - [Convolutional LSTM Network: A Machine Learning Approach for\n Precipitation Nowcasting](http://arxiv.org/abs/1506.04214v1)\n The current implementation does not include the feedback loop on the\n cells output\n \"\"\"\n\n def __init__(self,\n filters,\n kernel_size,\n strides=(1, 1),\n padding='valid',\n data_format=None,\n dilation_rate=(1, 1),\n activation='tanh',\n recurrent_activation='hard_sigmoid',\n use_bias=True,\n kernel_initializer='glorot_uniform',\n recurrent_initializer='orthogonal',\n bias_initializer='zeros',\n unit_forget_bias=True,\n kernel_regularizer=None,\n recurrent_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n recurrent_constraint=None,\n bias_constraint=None,\n return_sequences=False,\n go_backwards=False,\n stateful=False,\n dropout=0.,\n recurrent_dropout=0.,\n **kwargs):\n super(ConvLSTM2D, self).__init__(\n filters,\n kernel_size,\n strides=strides,\n padding=padding,\n data_format=data_format,\n dilation_rate=dilation_rate,\n return_sequences=return_sequences,\n go_backwards=go_backwards,\n stateful=stateful,\n activity_regularizer=regularizers.get(activity_regularizer),\n **kwargs)\n self.activation = activations.get(activation)\n self.recurrent_activation = activations.get(recurrent_activation)\n self.use_bias = use_bias\n\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.recurrent_initializer = initializers.get(recurrent_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n self.unit_forget_bias = unit_forget_bias\n\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.recurrent_regularizer = regularizers.get(recurrent_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.recurrent_constraint = constraints.get(recurrent_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n\n self.dropout = min(1., max(0., dropout))\n self.recurrent_dropout = min(1., max(0., recurrent_dropout))\n self.state_spec = [InputSpec(ndim=4), InputSpec(ndim=4)]\n\n def build(self, input_shape):\n if isinstance(input_shape, list):\n input_shape = input_shape[0]\n input_shape = tuple(tensor_shape.TensorShape(input_shape).as_list())\n batch_size = input_shape[0] if self.stateful else None\n self.input_spec[0] = InputSpec(shape=(batch_size, None) + input_shape[2:])\n\n if self.stateful:\n self.reset_states()\n else:\n # initial states: 2 all-zero tensor of shape (filters)\n self.states = [None, None]\n\n if self.data_format == 'channels_first':\n channel_axis = 2\n else:\n channel_axis = -1\n if input_shape[channel_axis] is None:\n raise ValueError('The channel dimension of the inputs '\n 'should be defined. Found `None`.')\n input_dim = input_shape[channel_axis]\n state_shape = [None] * 4\n state_shape[channel_axis] = input_dim\n state_shape = tuple(state_shape)\n self.state_spec = [\n InputSpec(shape=state_shape),\n InputSpec(shape=state_shape)\n ]\n kernel_shape = self.kernel_size + (input_dim, self.filters * 4)\n self.kernel_shape = kernel_shape\n recurrent_kernel_shape = self.kernel_size + (self.filters, self.filters * 4)\n\n self.kernel = self.add_weight(\n shape=kernel_shape,\n initializer=self.kernel_initializer,\n name='kernel',\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n self.recurrent_kernel = self.add_weight(\n shape=recurrent_kernel_shape,\n initializer=self.recurrent_initializer,\n name='recurrent_kernel',\n regularizer=self.recurrent_regularizer,\n constraint=self.recurrent_constraint)\n if self.use_bias:\n self.bias = self.add_weight(\n shape=(self.filters * 4,),\n initializer=self.bias_initializer,\n name='bias',\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n if self.unit_forget_bias:\n bias_value = np.zeros((self.filters * 4,))\n bias_value[self.filters:self.filters * 2] = 1.\n K.set_value(self.bias, bias_value)\n else:\n self.bias = None\n\n self.kernel_i = self.kernel[:, :, :, :self.filters]\n self.recurrent_kernel_i = self.recurrent_kernel[:, :, :, :self.filters]\n self.kernel_f = self.kernel[:, :, :, self.filters:self.filters * 2]\n self.recurrent_kernel_f = self.recurrent_kernel[:, :, :, self.filters:\n self.filters * 2]\n self.kernel_c = self.kernel[:, :, :, self.filters * 2:self.filters * 3]\n self.recurrent_kernel_c = self.recurrent_kernel[:, :, :, self.filters * 2:\n self.filters * 3]\n self.kernel_o = self.kernel[:, :, :, self.filters * 3:]\n self.recurrent_kernel_o = self.recurrent_kernel[:, :, :, self.filters * 3:]\n\n if self.use_bias:\n self.bias_i = self.bias[:self.filters]\n self.bias_f = self.bias[self.filters:self.filters * 2]\n self.bias_c = self.bias[self.filters * 2:self.filters * 3]\n self.bias_o = self.bias[self.filters * 3:]\n else:\n self.bias_i = None\n self.bias_f = None\n self.bias_c = None\n self.bias_o = None\n self.built = True\n\n def get_initial_state(self, inputs):\n # (samples, timesteps, rows, cols, filters)\n initial_state = K.zeros_like(inputs)\n # (samples, rows, cols, filters)\n initial_state = K.sum(initial_state, axis=1)\n shape = list(self.kernel_shape)\n shape[-1] = self.filters\n initial_state = self.input_conv(\n initial_state, K.zeros(tuple(shape)), padding=self.padding)\n\n initial_states = [initial_state for _ in range(2)]\n return initial_states\n\n def reset_states(self):\n if not self.stateful:\n raise RuntimeError('Layer must be stateful.')\n input_shape = self.input_spec[0].shape\n\n if not input_shape[0]:\n raise ValueError('If a RNN is stateful, a complete '\n 'input_shape must be provided '\n '(including batch size). '\n 'Got input shape: ' + str(input_shape))\n\n if self.return_state:\n output_shape = tuple(self._compute_output_shape(input_shape)[0].as_list())\n else:\n output_shape = tuple(self._compute_output_shape(input_shape).as_list())\n if self.return_sequences:\n output_shape = (input_shape[0],) + output_shape[2:]\n else:\n output_shape = (input_shape[0],) + output_shape[1:]\n\n if hasattr(self, 'states'):\n K.set_value(self.states[0],\n np.zeros(output_shape))\n K.set_value(self.states[1],\n np.zeros(output_shape))\n else:\n self.states = [\n K.zeros(output_shape),\n K.zeros(output_shape)\n ]\n\n def get_constants(self, inputs, training=None):\n constants = []\n if self.implementation == 0 and 0 < self.dropout < 1:\n ones = K.zeros_like(inputs)\n ones = K.sum(ones, axis=1)\n ones += 1\n\n def dropped_inputs():\n return K.dropout(ones, self.dropout)\n\n dp_mask = [\n K.in_train_phase(dropped_inputs, ones, training=training)\n for _ in range(4)\n ]\n constants.append(dp_mask)\n else:\n constants.append([K.cast_to_floatx(1.) for _ in range(4)])\n\n if 0 < self.recurrent_dropout < 1:\n shape = list(self.kernel_shape)\n shape[-1] = self.filters\n ones = K.zeros_like(inputs)\n ones = K.sum(ones, axis=1)\n ones = self.input_conv(ones, K.zeros(shape), padding=self.padding)\n ones += 1.\n\n def dropped_inputs(): # pylint: disable=function-redefined\n return K.dropout(ones, self.recurrent_dropout)\n\n rec_dp_mask = [\n K.in_train_phase(dropped_inputs, ones, training=training)\n for _ in range(4)\n ]\n constants.append(rec_dp_mask)\n else:\n constants.append([K.cast_to_floatx(1.) for _ in range(4)])\n return constants\n\n def input_conv(self, x, w, b=None, padding='valid'):\n conv_out = K.conv2d(\n x,\n w,\n strides=self.strides,\n padding=padding,\n data_format=self.data_format,\n dilation_rate=self.dilation_rate)\n if b is not None:\n conv_out = K.bias_add(conv_out, b, data_format=self.data_format)\n return conv_out\n\n def reccurent_conv(self, x, w):\n conv_out = K.conv2d(\n x, w, strides=(1, 1), padding='same', data_format=self.data_format)\n return conv_out\n\n def step(self, inputs, states):\n assert len(states) == 4\n h_tm1 = states[0]\n c_tm1 = states[1]\n dp_mask = states[2]\n rec_dp_mask = states[3]\n\n x_i = self.input_conv(\n inputs * dp_mask[0], self.kernel_i, self.bias_i, padding=self.padding)\n x_f = self.input_conv(\n inputs * dp_mask[1], self.kernel_f, self.bias_f, padding=self.padding)\n x_c = self.input_conv(\n inputs * dp_mask[2], self.kernel_c, self.bias_c, padding=self.padding)\n x_o = self.input_conv(\n inputs * dp_mask[3], self.kernel_o, self.bias_o, padding=self.padding)\n h_i = self.reccurent_conv(h_tm1 * rec_dp_mask[0], self.recurrent_kernel_i)\n h_f = self.reccurent_conv(h_tm1 * rec_dp_mask[1], self.recurrent_kernel_f)\n h_c = self.reccurent_conv(h_tm1 * rec_dp_mask[2], self.recurrent_kernel_c)\n h_o = self.reccurent_conv(h_tm1 * rec_dp_mask[3], self.recurrent_kernel_o)\n\n i = self.recurrent_activation(x_i + h_i)\n f = self.recurrent_activation(x_f + h_f)\n c = f * c_tm1 + i * self.activation(x_c + h_c)\n o = self.recurrent_activation(x_o + h_o)\n h = o * self.activation(c)\n return h, [h, c]\n\n def get_config(self):\n config = {\n 'activation':\n activations.serialize(self.activation),\n 'recurrent_activation':\n activations.serialize(self.recurrent_activation),\n 'use_bias':\n self.use_bias,\n 'kernel_initializer':\n initializers.serialize(self.kernel_initializer),\n 'recurrent_initializer':\n initializers.serialize(self.recurrent_initializer),\n 'bias_initializer':\n initializers.serialize(self.bias_initializer),\n 'unit_forget_bias':\n self.unit_forget_bias,\n 'kernel_regularizer':\n regularizers.serialize(self.kernel_regularizer),\n 'recurrent_regularizer':\n regularizers.serialize(self.recurrent_regularizer),\n 'bias_regularizer':\n regularizers.serialize(self.bias_regularizer),\n 'activity_regularizer':\n regularizers.serialize(self.activity_regularizer),\n 'kernel_constraint':\n constraints.serialize(self.kernel_constraint),\n 'recurrent_constraint':\n constraints.serialize(self.recurrent_constraint),\n 'bias_constraint':\n constraints.serialize(self.bias_constraint),\n 'dropout':\n self.dropout,\n 'recurrent_dropout':\n self.recurrent_dropout\n }\n base_config = super(ConvLSTM2D, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n","sub_path":"Tensorflow_Pandas_Numpy/source3.6/tensorflow/python/keras/_impl/keras/layers/convolutional_recurrent.py","file_name":"convolutional_recurrent.py","file_ext":"py","file_size_in_byte":24922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"11325063","text":"\"\"\"Coin-flipping Tourney\"\"\"\n\n__author__ = \"730394272\"\n\nfrom random import randint\nwinemoji: str = \"\\U00002728\"\nlemoji: str = \"\\U0001F625\"\nplayer: str = input(\"Enter player name: \")\npoints: int = 0\n\n\ndef greet() -> None:\n \"\"\"Greets the player\"\"\"\n print(f\"Welcome {player} to the ultimate coin flipping tournament. Can you reach 10 correct guesses in a row?\")\n\n\ndef toss(points) -> int:\n coin: int = randint(1, 2)\n flip: str = input(\"Heads or tails or quit? \")\n if flip == \"Quit\":\n points = quit(points)\n points = -1\n else: \n if coin == 1 and flip != \"Quit\":\n print(\"The coin lands on....HEADS\")\n else:\n if coin == 2 and flip != \"Quit\":\n print(\"The coin lands on....TAILS\")\n if coin == 1 and flip == \"Heads\":\n print(f\"{winemoji}{winemoji}You win!!!!{winemoji}{winemoji}\")\n points = points + 1\n print(f\"Score: {points}\")\n else:\n if coin == 1 and flip == \"Tails\":\n print(f\"Sorry {player}, you lose. {lemoji}\")\n print(f\"Score: {points}\")\n points = -1\n if coin == 2 and flip == \"Tails\":\n print(f\"{winemoji}{winemoji}You win!!!!{winemoji}{winemoji}\")\n points = points + 1\n print(f\"Score: {points}\")\n else:\n if coin == 2 and flip == \"Heads\":\n print(f\"Sorry {player}, you lose. {lemoji}\")\n print(f\"Score: {points}\")\n points = -1\n return points\n \n \ndef main() -> None:\n \"\"\"Main\"\"\"\n points: int = 0\n while points == 0:\n greet()\n quit(points)\n while points >= 0 and points < 10: \n points = toss(points)\n\n\ndef quit(points) -> int:\n flip: str = input(\"Heads or tails or quit? \")\n if flip == \"Quit\":\n print(f\"Goodbye {player}! Here is your score: {points}\")\n points = -1\n return points\n\n\nmain()","sub_path":"projects/cyoa.py","file_name":"cyoa.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"619125047","text":"from django.views.generic import DetailView, View\nfrom django.http.response import JsonResponse\nfrom django.views.generic.edit import FormMixin\nfrom django.shortcuts import redirect\nfrom django.urls import reverse\nfrom django.conf import settings\nimport shlex\nimport os\nimport requests\nfrom subprocess import Popen\nfrom hypertube.utils import build_query_tmdb\nfrom hypertube.subtitles import get_subtitle, SUBTITLES_PATH\nfrom home.models import Movie, Comments\nfrom users.models import ExtendedUserModel\n\nfrom .forms import NewCommentForm\n\nprocess = list()\n\n\nclass StreamingView(DetailView, FormMixin):\n template_name = 'movie.html'\n model = Movie\n context_object_name = 'movie'\n form_class = NewCommentForm\n\n def get(self, request, *args, **kwargs):\n self.object = self.get_object()\n ext_user = ExtendedUserModel.objects.get(user=self.request.user)\n self.ext_user = ext_user\n if self.object not in ext_user.already_seen.all():\n ext_user.already_seen.add(self.object)\n FNULL = open(os.devnull, 'w')\n cmd = 'python3.7 {}/netflux_service/netflux-cli.py \"{}\" {}'.format(settings.PROJECT_DIR, self.object.torrents, request.user.username)\n p = Popen(shlex.split(cmd), stdout=FNULL, stderr=FNULL)\n process.append((request.user.username, p))\n return super(StreamingView, self).get(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n self.object = self.get_object()\n form = self.get_form()\n if form.is_valid():\n return self.form_valid(form)\n else:\n return self.form_invalid(form)\n\n def get_context_data(self, **kwargs):\n context = super(StreamingView, self).get_context_data(**kwargs)\n query = build_query_tmdb(\"find/{}\". format(self.object.imdb_code)) + \"&external_source=imdb_id\"\n id = requests.get(query).json()['movie_results'][0]['id']\n query = requests.get(build_query_tmdb(\"movie/{}/credits\". format(id))).json()\n context['casting'] = query['cast'][:10]\n context['crew'] = [x for x in query['crew'] if x['department'] in ('Production', 'Directing')][:6]\n context['form'] = NewCommentForm\n if not os.path.exists(SUBTITLES_PATH + \"{}/{}_en.vtt\".format(self.object.imdb_code, self.object.imdb_code)):\n lang = ['English']\n tracks = []\n if self.ext_user.default_lang == 'fr':\n lang.append('French')\n ret = get_subtitle(self.object.imdb_code, languages=lang)\n if 'English' in ret:\n tracks.append(\"\".format(self.object.imdb_code, self.object.imdb_code))\n if self.ext_user.default_lang == 'fr' and 'French' in ret:\n tracks.append(\"\".format(self.object.imdb_code, self.object.imdb_code))\n context['tracks'] = \"\".join(tracks)\n else:\n context['tracks'] = ''\n return context\n\n def form_valid(self, form):\n c = Comments(slug_movie=self.object.slug, author=self.request.user.username, content=form.cleaned_data['content'])\n c.save()\n self.object.comments.add(c)\n return redirect(reverse('movie', args=[self.object.slug]))\n\n\nclass SetPortView(View):\n def get(self, request, *args, **kwargs):\n port = request.GET.get('port', None)\n username = request.GET.get('username', None)\n f_type = request.GET.get('type', None)\n if port and username:\n ExtendedUserModel.objects.filter(user__username=username).update(port=port, type=f_type)\n return JsonResponse({})\n\n\nclass GetPortView(View):\n def get(self, request, *args, **kwargs):\n username = request.GET.get('username', None)\n if username:\n ext_user = ExtendedUserModel.objects.filter(user__username=username)\n if len(ext_user) == 0:\n return JsonResponse({})\n else:\n ext_user = ext_user[0]\n else:\n return JsonResponse({})\n if ext_user.port:\n return JsonResponse({'port': ext_user.port, 'type': ext_user.type})\n else:\n return JsonResponse({})\n","sub_path":"apps/streaming/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"168679458","text":"# -*- coding: utf-8 -*-\n# Source: https://raw.github.com/GoogleCloudPlatform/appengine-sharded-counters-python/c0641cf7f64288fb819ccf996533a1ab4aa53106/general_counter.py\n\n# Copyright 2008 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n# Some modifications by adayoung\n\n\"\"\"A module implementing a general sharded counter.\"\"\"\n\n\nimport random\n\nfrom google.appengine.api import memcache\nfrom google.appengine.ext import ndb\nfrom datetime import datetime\n\n\nSHARD_KEY_TEMPLATE = 'shard-{}-{:d}'\n\n\nclass GeneralCounterShardConfig(ndb.Model):\n\t\"\"\"Tracks the number of shards for each named counter.\"\"\"\n\tnum_shards = ndb.IntegerProperty(default=20)\n\n\t@classmethod\n\tdef all_keys(cls, name):\n\t\t\"\"\"Returns all possible keys for the counter name given the config.\n\n\t\tArgs:\n\t\t\tname: The name of the counter.\n\n\t\tReturns:\n\t\t\tThe full list of ndb.Key values corresponding to all the possible\n\t\t\t\tcounter shards that could exist.\n\t\t\"\"\"\n\t\tconfig = cls.get_or_insert(name)\n\t\tshard_key_strings = [SHARD_KEY_TEMPLATE.format(name, index)\n\t\t\t\t\t\t\t for index in range(config.num_shards)]\n\t\treturn [ndb.Key(GeneralCounterShard, shard_key_string)\n\t\t\t\tfor shard_key_string in shard_key_strings]\n\n\nclass GeneralCounterShard(ndb.Model):\n\t\"\"\"Shards for each named counter.\"\"\"\n\tcount = ndb.IntegerProperty(default=0)\n\tlast_viewed = ndb.DateTimeProperty(auto_now_add=True)\n\n\ndef get_count(name):\n\t\"\"\"Retrieve the count and last_viewed time for a given sharded counter.\n\n\tArgs:\n\t\tname: The name of the counter.\n\n\tReturns:\n\t\t(Integer, datetime); the cumulative count of all sharded counters for the given\n\t\t\tcounter name along with time.\n\t\"\"\"\n\ttotal = memcache.get(name)\n\tlast = memcache.get(\"%s_time\" % name) or datetime.now()\n\tif total is None:\n\t\ttotal = 0\n\n\t\tall_keys = GeneralCounterShardConfig.all_keys(name)\n\t\tall_shards = ndb.get_multi(all_keys)\n\n\t\tfor counter in all_shards:\n\t\t\tif counter is not None:\n\t\t\t\ttotal += counter.count\n\n\t\tall_times = [i.last_viewed for i in all_shards if i is not None]\n\t\tif len(all_times) != 0:\n\t\t\tall_times.sort()\n\t\t\tlast = all_times[-1]\n\n\t\tmemcache.add(name, total, 60)\n\t\tmemcache.add(\"%s_time\" % name, last, 60)\n\n\treturn (total, last)\n\n\ndef increment(name):\n\t\"\"\"Increment the value for a given sharded counter.\n\n\tArgs:\n\t\tname: The name of the counter.\n\t\"\"\"\n\ttry:\n\t\tconfig = GeneralCounterShardConfig.get_or_insert(name)\n\t\t_increment(name, config.num_shards)\n\texcept:\n\t\tpass # don't bother if we've run out of quota or whatnot\n\n\n@ndb.transactional\ndef _increment(name, num_shards):\n\t\"\"\"Transactional helper to increment the value for a given sharded counter.\n\n\tAlso takes a number of shards to determine which shard will be used.\n\n\tArgs:\n\t\tname: The name of the counter.\n\t\tnum_shards: How many shards to use.\n\t\"\"\"\n\tindex = random.randint(0, num_shards - 1)\n\tshard_key_string = SHARD_KEY_TEMPLATE.format(name, index)\n\tcounter = GeneralCounterShard.get_by_id(shard_key_string)\n\tif counter is None:\n\t\tcounter = GeneralCounterShard(id=shard_key_string)\n\tcounter.count += 1\n\ttry:\n\t\tcounter.put()\n\texcept:\n\t\tpass # don't bother if we've run out of quota or whatnot\n\t# Memcache increment does nothing if the name is not a key in memcache\n\tmemcache.incr(name)\n\n\n@ndb.transactional\ndef increase_shards(name, num_shards):\n\t\"\"\"Increase the number of shards for a given sharded counter.\n\n\tWill never decrease the number of shards.\n\n\tArgs:\n\t\tname: The name of the counter.\n\t\tnum_shards: How many shards to use.\n\t\"\"\"\n\tconfig = GeneralCounterShardConfig.get_or_insert(name)\n\tif config.num_shards < num_shards:\n\t\tconfig.num_shards = num_shards\n\t\tconfig.put()\n","sub_path":"general_counter.py","file_name":"general_counter.py","file_ext":"py","file_size_in_byte":4060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"190252857","text":"# -*- coding: utf-8 -*-\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom data.models import *\n\n\nclasses = [Temperature, Pressure, Illuminance,\n Humidity, Voltage, Custom]\n\n\ndef getSensors():\n num = 0\n sensors = []\n for cla in classes:\n cla_dict = {'type': cla.valueName}\n cla_dict.setdefault('items', [])\n for sensor in Sensors.objects.filter(type=cla.__name__):\n if sensor.safety_min and sensor.safety_max:\n condition = u'介于 %s 至 %s 之间' % (\n sensor.safety_min, sensor.safety_max)\n elif sensor.safety_min:\n condition = u'大于 %s' % sensor.safety_min\n elif sensor.safety_max:\n condition = u'小于 %s' % sensor.safety_max\n else:\n condition = '---'\n t = Token.objects.get(sensorId=sensor.id)\n sensor_dict = {'name': sensor.name,\n 'id': 'sensor' + str(sensor.id),\n 'safety_min': sensor.safety_min,\n 'safety_max': sensor.safety_max,\n 'safetyCond': condition,\n 'submitUrl': '/api/submit/?id=sensor%s&token=%s&step=10&unit=&val=' % (\n t.sensorId, t.token)\n }\n cla_dict['items'].append(sensor_dict)\n num += 1\n sensors.append(cla_dict)\n if num == 0:\n sensors = []\n return sensors, num\n\n\ndef index(request):\n ctx = {'radarSensors': [],\n 'sensors': [],\n 'cnt': 0}\n for r in RadarChart.objects.all():\n r_dict = {'name': r.sensor.name}\n ctx['radarSensors'].append(r_dict)\n ctx['sensors'], ctx['cnt'] = getSensors()\n return render(request, 'view.html', ctx)\n\n\ndef device(request):\n if not request.user.is_staff:\n return HttpResponse('

请先联系管理员,通过专业用户验证

')\n ctx = {'classes': [],\n 'sensors': []}\n for cla in classes:\n cla_add = {'name': cla.valueName,\n 'value': cla.__name__}\n ctx['classes'].append(cla_add)\n ctx['sensors'] = getSensors()[0]\n return render(request, 'device.html', ctx)\n","sub_path":"view/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"402506566","text":"# -*- coding: utf-8 -*-\nimport json\nimport pandas as pd\nimport plotly.express as px\nimport plotly.graph_objs as go\nimport numpy as np\nfrom ginical import * \ntoken='pk.eyJ1IjoibWluZ3l1YW4yMDIwIiwiYSI6ImNrZjE2OHl6cTBwMWIyeXA3dGlpdDczYjAifQ.uBSZVa4tvUUf5pDxwrOKuw'\n\ndef get_village_info(filename):\n villagelist=pd.read_json(filename)\n return villagelist\ndef get_statistic_data(filename):\n statistics=pd.read_csv(filename)\n return statistics\n\nvillages=get_village_info('./data/villages_allinfo.json')\nstatistics=get_statistic_data('./data/statistic.csv')\nschools=pd.read_json('./data/schools_allinfo.json')\nprint (schools)\n\n\nwith open('./data/maps.json','r') as response:\n districts_map = json.load(response)\n\ndf = pd.read_csv(\"./data/mapinfo.csv\")\nprint (df)\nimport plotly.express as px\n\n#district_fig.show()\n\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom details import *#info,districtdict\napp = dash.Dash(__name__,meta_tags=[{\"name\":\"viewport\",\"content\":\"width=device-width\"}])\napp.layout = html.Div(\n children=\n [\n html.H2(id='Title',children=\"北京市西城区教育公平与资源配置研究\",style={\"width\":\"100%\",\"text-align\":\"center\",\"height\":\"50px\",\"color\":\"#999\"}),\n html.Div(id='Body',children=\n [\n html.Div(id='Map module',children=\n [\n html.Div(children=\n dcc.RangeSlider(\n id='year',\n min=0,\n max=6,\n value=[0,6],\n marks={str(i):str(i+2020) for i in range(7)},\n step=1\n ),\n style={\"width\":\"80%\",\"margin\":\"auto\",\"height\":\"50px\",\"align-item\":\"center\"}),\n html.Div(children=\n [\n # dcc.Graph(figure=district_fig)\n ],id='Map'),\n html.Div(children=\n dcc.Slider(id='districts',\n min=0,\n max=11,\n value=1,\n marks=districtdict,\n step=None),\n style={\"width\":\"85%\",\"margin\":\"auto\",\"height\":\"50px\",\"align-item\":\"center\"})\n \n ],className='pretty_container eight columns'),\n html.Div(id='left_module',children=[\n html.Div(id='control tabs',children=\n [\n dcc.Tabs(id='tabs',value='what-is',children=\n [\n dcc.Tab(label='介绍',value='what-is',children=\n [\n html.Div(children=\n [\n html.P(info['introduction'],\\\n style={\"text-align\":\"center\",\"height\":\"100%\"})\n ])\n ]),\n dcc.Tab(label='控制',value='control page',id='control page',children=\n [\n html.Div(id='variable control',children=[\n html.Div(children=[\n html.Div(id='transport div',children=\n [\n html.P('适龄儿童上学交通方式',style={\"text-align\":\"center\"}),\n dcc.Dropdown(id='transport',\n options=[{'label':op,'value':op} for op in avaliable_transportation],\n value=avaliable_transportation[0],\n style={\"margin\":\"auto\",\"width\":\"90\",\"padding\":\"10px\"},\n multi=True),\n ],style={\"width\":\"50%\"}),\n html.Div(id='income div',children=\n [\n html.P('可支配收入水平',style={\"text-align\":\"center\"}),\n dcc.Dropdown(id='Income level',\n options=[{'label':op,'value':op} for op in avaliable_incomelevel],\n value=avaliable_incomelevel[0],\n style={\"margin\":\"auto\",\"width\":\"90\",\"padding\":\"10px\"}),\n ],style={\"width\":\"50%\"})\n ],style={\"display\":\"flex\",'height':\"4%\"}),\n\n html.Div(children=[\n html.Div(id='policy div',children=\n [\n html.P('适龄儿童入学政策',style={\"text-align\":\"center\"}),\n dcc.Dropdown(id='policy',\n options=[{'label':op,'value':op} for op in avaliable_policy],\n value=avaliable_policy[0],\n style={\"margin\":\"auto\",\"width\":\"90\",\"padding\":\"10px\"}),\n ],style={\"width\":\"50%\",\"align-item\":\"center\"}),\n html.Div(id='research div',children=[\n html.P('教育公平观测维度',\n style={\"text-align\":\"center\"}),\n dcc.Dropdown(id='research object',\n options=[{'label':op,'value':op} for op in avaliable_research_object],\n value=avaliable_research_object[0],\n style={\"margin\":\"auto\",\"width\":\"90\",\"padding\":\"10px\"}),\n ],style={\"width\":\"50%\",\"align-item\":\"center\"}), \n ],style={\"display\":\"flex\",\"height\":\"4%\"}),\n html.Div(id='show div',children=[\n html.P('显示信息',\n style={\"text-align\":\"center\"}), \n dcc.Dropdown(id='show object',\n options=[{'label':op,'value':op} for op in avaliable_show_object],\n value=[avaliable_show_object[1],avaliable_show_object[2]],\n multi=True,\n style={\"width\":\"90%\",\"margin\":\"auto\",\"padding\":\"10px\"})\n ],style={\"width\":\"100%\"})\n ],style={\"width\":\"100%\"})\n ]),\n dcc.Tab(label='信息',value='info page',id='info page',children=[\n html.P('小区信息',\n style={\"text-align\":\"center\"}),\n dcc.Dropdown(id='village option',\n options=[{'label':op,'value':op} for op in avaliable_village_info],\n value=avaliable_village_info[0],\n style={\"width\":\"90%\",\"margin\":\"auto\",\"padding\":\"10px\"}),\n html.P('学校信息',\n style={\"text-align\":\"center\"}),\n dcc.Dropdown(id='school option',\n options=[{'label':op,'value':op} for op in avaliable_school_info],\n value=avaliable_school_info[0],\n style={\"width\":\"90%\",\"margin\":\"auto\",\"padding\":\"10px\"}),\n html.P('学区信息',\n style={\"text-align\":\"center\"}),\n dcc.Dropdown(id='district option',\n options=[{'label':op,'value':op} for op in avaliable_district_info],\n value=avaliable_district_info[0],\n style={\"width\":\"90%\",\"margin\":\"auto\",\"padding\":\"10px\"}),\n ]),\n dcc.Tab(label='建议',value='advice page',id='advice page',children=[]),\n ])\n ],className='pretty_container',style={\"height\":\"42%\"}),\n html.Div(id='Data option',children=[\n dcc.Dropdown(id='statistic option',\n options=[{'label':op,'value':op} for op in avaliable_statistics],\n value=avaliable_statistics[1],\n style={\"width\":\"90%\",\"margin\":\"auto\",\"align\":\"center\"}),\n ],className='pretty_container'),\n html.Div(id='Data show',className=\"pretty_container\",style={\"height\":\"42%\"})\n ],className='four columns',style={'height':'930px'})\n ],className='raw flex-display',style={\"color\":\"#999\"}),\n html.Div(id='statistic chart',children=[\n html.Div(id='district select',\n children=[\n html.P('学区',\n style={\"text-align\":\"center\"}), \n dcc.Dropdown(id='district object',\n options=[{'label':op,'value':op} for op in district_object],\n value=[district_object[1],district_object[2]],\n multi=True,\n style={\"width\":\"90%\",\"margin\":\"auto\",\"padding\":\"10px\"})\n ],\n className=\"pretty_container four columns\"),\n html.Div(id='school select',\n children=[\n html.P('学校',\n style={\"text-align\":\"center\"}), \n dcc.Dropdown(id='school object',\n options=[{'label':op,'value':op} for op in school_object],\n value=[school_object[1],school_object[2]],\n multi=True,\n style={\"width\":\"90%\",\"margin\":\"auto\",\"padding\":\"10px\"}) \n ],\n className=\"pretty_container four columns\"),\n html.Div(id='fair index select',\n children=[\n html.P('公平性指标',\n style={\"text-align\":\"center\"}), \n dcc.Dropdown(id='fairindex object',\n options=[{'label':op,'value':op} for op in fairindex_object],\n value=fairindex_object[1],\n multi=False,\n style={\"width\":\"90%\",\"margin\":\"auto\",\"padding\":\"10px\"}) \n ],\n className=\"pretty_container four columns\"),\n \n ],className='raw flex-display',style={\"color\":\"#999\"}),\n html.Div(id='fairindex district display chart',children=[\n html.Div(id='fairindex_district',\n children=[\n html.P('各学区公平性指标分布',\n style={\"text-align\":\"center\"}), \n html.Div(id='district fairindex show',className=\"pretty_container\",style={\"height\":\"300px\"})\n ],\n className=\"pretty_container twelve columns\"),],style={\"color\":\"#999\"}),\n html.Div(id='fairindex school display chart',children=[\n html.Div(id='fairindex_school',\n children=[\n html.P('各学校学区房对应公平性指标分布',\n style={\"text-align\":\"center\"}), \n html.Div(id='school fairindex show',className=\"pretty_container\",style={\"height\":\"300px\"})\n ],\n className=\"pretty_container twelve columns\")\n ],style={\"color\":\"#999\"}),#className='raw flex-display',style={\"color\":\"#999\"}),\n html.Div(id='gini display chart',children=[\n html.Div(id='gini display',\n children=[\n html.P('各学区公平性指标的基尼系数变化',\n style={\"text-align\":\"center\"}), \n html.Div(id='gini show',className=\"pretty_container\",style={\"height\":\"300px\"})\n ],className=\"pretty_container twelve columns\") ,\n \n #html.Div(id='school display',\n # children=[\n # html.P('学校属性变化',\n # style={\"text-align\":\"center\"}), \n # html.Div(id='school show',className=\"pretty_container\",style={\"height\":\"42%\"})\n #],className=\"pretty_container four columns\") , \n #html.Div(id='village display',\n # children=[\n # html.P('学校属性变化',\n # style={\"text-align\":\"center\"}), \n # html.Div(id='village show',className=\"pretty_container\",style={\"height\":\"42%\"})\n #],className=\"pretty_container four columns\") , \n \n \n ],style={\"color\":\"#999\"}),#className='raw flex-display',style={\"color\":\"#999\"}),\n ])\n\nfrom dash.dependencies import Input,Output\n\n@app.callback(Output('Data show','children'),[Input('statistic option','value')])\ndef show_statics(value):\n if value in ['家庭户均人口数','人均可支配收入','人均消费支出','教育支出','教育文化娱乐总支出','每一就业负担人数','恩格尔系数','小学招生数']:\n statisticsdat=go.Scatter(\n x=statistics['年份'],\n y=statistics[value], \n mode='markers+lines',\n opacity=0.7,\n marker={'size':10}, \n )\n #fig=go.Figure(statisticsdat)\n #fig.show()\n statisticlayout=go.Layout(margin={\"r\":60,\"t\":40,\"l\":60,\"b\":40},\n xaxis={\"title\":\"年份\"},\n yaxis={\"title\":value},\n showlegend=False)\n return dcc.Graph(figure={\"data\":[statisticsdat],\"layout\":statisticlayout},\n style={\"height\":\"100%\",\"width\":\"100%\"})\n#@app.callback(Output('Transportation Time Cost','children'),[Input('year','value'),Input('districts','value')])\n#def show_transportation_statistics(year,districtindex):\n\n@app.callback(Output('Map','children'),[Input('year','value'),\\\n Input('districts','value'),\\\n Input('show object','value'),\\\n Input('village option','value'),\\\n Input('school option','value'),\\\n Input('district option','value')])\ndef show_maps(year,districtindex,objects_show,village_option,school_option,district_option):\n print (year)\n year=year[0]+2010\n districtname=districtdict[str(districtindex)]\n print (year,districtname,objects_show)\n if '学区信息' in objects_show:\n district_opacity=0.3\n else:\n district_opacity=0.7\n\n district_fig = px.choropleth_mapbox(df, geojson=districts_map, locations='area', color='index',featureidkey=\"properties.name\",\n color_continuous_scale=\"Viridis\",\n range_color=(0, 10),\n #color_discrete_map={'A':'b','B':'red','C':'green','D':'gray','E':'orange','F':'puple','G':'yellow','H':'g','I':'brown','J':'r','K':'orange'},\n opacity=district_opacity,)\n \n if '小区信息' in objects_show:\n if village_option=='位置':\n village_hovertext=[villages.name[i]+' '+villages.address[i] for i in range(len(villages))]\n village_marker={\n 'size':10,'opacity':0.7,\n #'symbol':[\"town\" for i in range(len(villages.price))]\n } \n elif village_option=='房价':\n village_hovertext=[villages.name[i]+' %.2f'%villages.price[i]+' %s'%villages['school'][i][0] for i in range(len(villages))]\n village_marker={\n 'size':(villages.price-np.min(villages.price))/(np.max(villages.price)-np.min(villages.price))*30+2,\n 'color':villages.price,\n 'opacity':0.7\n }\n \"\"\"\n district_fig.add_trace(go.Scattermapbox(mode='markers',\n lon = villages.wgs84lng,\n lat = villages.wgs84lat,\n hovertext = village_hovertext,\n hoverinfo = 'text',\n marker=village_marker,\n ))\n \"\"\"\n district_fig.add_trace(go.Densitymapbox(lat=villages.wgs84lat,lon=villages.wgs84lng,z=villages.price,radius=20))\n \n if '学校信息' in objects_show:\n if school_option=='位置':\n school_hovertext=[schools.school[i]+' '+schools.addressnum[i]+' '+schools.district[i]+' %.4f,%.4f'%(schools.gcj02lng[i],schools.gcj02lat[i]) for i in range(len(schools))]\n #school_marker={'size':30,'opacity':0.7,'color':[districtindexdict[schools.district[i].strip('学区')] for i in range(len(schools))],\n school_marker={'size':20,\n 'symbol':[\"bus\" for i in range(len(schools))]}\n elif school_option==\"管理水平\":\n school_hovertext=[schools.school[i]+' '+str(schools.envir_score[i]) for i in range(len(schools))]\n school_marker={'size':(schools.envir_score-np.min(schools.envir_score))/(np.max(schools.envir_score)-np.min(schools.envir_score))*40+10,\n 'color':schools.envir_score,\n }\n elif school_option==\"北京市学科带头人人数\":\n school_hovertext=[schools.school[i]+' '+str(schools.city_leadernum[i]) for i in range(len(schools))]\n school_marker={'size':(schools.city_leadernum-np.min(schools.city_leadernum))/(np.max(schools.city_leadernum)-np.min(schools.city_leadernum))*40+10,\n 'color':schools.city_leadernum,\n }\n elif school_option==\"北京市学科骨干教师人数\":\n school_hovertext=[schools.school[i]+' '+str(schools.city_teachernum[i]) for i in range(len(schools))]\n school_marker={'size':(schools.city_teachernum-np.min(schools.city_teachernum))/(np.max(schools.city_teachernum)-np.min(schools.city_teachernum))*40+10,\n 'color':schools.city_teachernum,\n }\n elif school_option==\"学校等级\":\n school_hovertext=[schools.school[i]+' '+schools.level[i] for i in range(len(schools))]\n datarray=np.array([school_leveldict[schools.level[i]] for i in range(len(schools))])\n school_marker={'size':(datarray-np.min(datarray))/(np.max(datarray)-np.min(datarray))*40+10,\n 'color':datarray,\n }\n elif school_option==\"家长评分\":\n school_hovertext=[schools.school[i]+' '+str(schools.score[i]) for i in range(len(schools))]\n school_marker={'size':(schools.score-np.min(schools.score))/(np.max(schools.score)-np.min(schools.score))*40+10,\n 'color':schools.score,\n }\n elif school_option==\"综合评分\":\n datarray=np.array([school_leveldict[schools.level[i]] for i in range(len(schools))])\n avgscore=(schools.envir_score-np.min(schools.envir_score))/(np.max(schools.envir_score)-np.min(schools.envir_score))+\\\n (schools.city_leadernum-np.min(schools.city_leadernum))/(np.max(schools.city_leadernum)-np.min(schools.city_leadernum))+\\\n (schools.city_teachernum-np.min(schools.city_teachernum))/(np.max(schools.city_teachernum)-np.min(schools.city_teachernum))+\\\n (datarray-np.min(datarray))/(np.max(datarray)-np.min(datarray))+\\\n (schools.score-np.min(schools.score))/(np.max(schools.score)-np.min(schools.score))\n avgscore=avgscore/5.0*100\n school_marker={'size':(avgscore-np.min(avgscore))/(np.max(avgscore)-np.min(avgscore))*40+10,\n 'color':avgscore,\n }\n school_hovertext=[schools.school[i]+' '+str(avgscore[i]) for i in range(len(schools))]\n district_fig.add_trace(go.Scattermapbox(mode='markers',\n lon = schools.wgs84lng,\n lat = schools.wgs84lat,\n text = school_hovertext,\n #hoverinfo = 'text',\n marker = school_marker,))\n \n if districtindex<11:\n zoomlevel=14\n center={\"lat\":df['lat'][districtindex],\"lon\":df['lng'][districtindex]}\n else:\n zoomlevel=12\n center={\"lat\":df['lat'][4] , \"lon\": df['lng'][4]}\n district_fig.update_layout(\n margin={\"r\":0,\"t\":0,\"l\":0,\"b\":0},\n mapbox={\"accesstoken\":token,\n 'center':center,\n 'zoom':zoomlevel,\n #'style':\"open-street-map\"},\n #'style':\"outdoors\"\n \"style\":\"light\"},\n showlegend=False,\n height=800,)\n return dcc.Graph(figure=district_fig)\n\n@app.callback(Output('district fairindex show','children'),[Input('year','value'),Input('district object','value'),Input('fairindex object','value')])\ndef plot_fair_index_district(years,districts,key): \n figdict={}\n #for key in ['current price','educost','timecost','eduresource','educost_efficiency','timecost_efficiency','edu_resource difference district','edu_resource difference xicheng']:\n figdict[key]=go.Figure()\n nowresult =[now_villages[key][i] for i in range(len(now_villages[key])) if now_villages['学区'][i] in districts]\n nowdistrict=[now_villages['学区'][i] for i in range(len(now_villages[key])) if now_villages['学区'][i] in districts]\n figdict[key].add_trace(go.Box(y=nowresult,x=nowdistrict,name='2020'))\n for i in range(years[0],years[1]+1,1):\n newresult=[vlist[i][key][j] for j in range(len(vlist[i][key])) if vlist[i]['学区'][j] in districts]\n newdistrict=[vlist[i]['学区'][j] for j in range(len(vlist[i][key])) if vlist[i]['学区'][j] in districts]\n figdict[key].add_trace(go.Box(y=newresult,x=newdistrict,name=str(2020+i)))\n figdict[key].update_layout(\n yaxis_title=key,\n boxmode='group', # group together boxes of the different traces for each value of x\n margin={\"r\":60,\"t\":40,\"l\":60,\"b\":40},\n )\n return [dcc.Graph(figure=figdict[key],\n style={\"height\":\"100%\",\"width\":\"100%\"})]\n\n@app.callback(Output('school fairindex show','children'),[Input('year','value'),Input('school object','value'),Input('fairindex object','value')])\ndef plot_fair_index_school(years,schools,key): \n figdict={}\n #for key in ['current price','educost','timecost','eduresource','educost_efficiency','timecost_efficiency','edu_resource difference district','edu_resource difference xicheng']:\n figdict[key]=go.Figure()\n nowresult =[now_villages[key][i] for i in range(len(now_villages[key])) if now_villages['学校'][i] in schools]\n nowdistrict=[now_villages['学校'][i] for i in range(len(now_villages[key])) if now_villages['学校'][i] in schools]\n figdict[key].add_trace(go.Box(y=nowresult,x=nowdistrict,name='2020'))\n for i in range(years[0],years[1]+1,1):\n newresult=[vlist[i][key][j] for j in range(len(vlist[i][key])) if vlist[i]['学校'][j] in schools]\n newdistrict=[vlist[i]['学校'][j] for j in range(len(vlist[i][key])) if vlist[i]['学校'][j] in schools]\n figdict[key].add_trace(go.Box(y=newresult,x=newdistrict,name=str(2020+i)))\n figdict[key].update_layout(\n yaxis_title=key,\n boxmode='group', # group together boxes of the different traces for each value of x\n margin={\"r\":60,\"t\":40,\"l\":60,\"b\":40},\n )\n return [dcc.Graph(figure=figdict[key],\n style={\"height\":\"100%\",\"width\":\"100%\"})]\n\n@app.callback(Output('gini show','children'),[Input('year','value'),Input('district object','value'),Input('fairindex object','value')]) \ndef plot_gini_district(years,districts,key):\n figdict={}\n #for key in ['educost','timecost','eduresource','educost_efficiency','timecost_efficiency','edu_resource difference district','edu_resource difference xicheng']:\n figdict[key]=go.Figure()\n for district in districts:\n result=[]\n for i in range(years[0],years[1]+1,1):\n result.append(ginilist[i][district][key])\n figdict[key].add_trace(go.Scatter(x=list(range(2020+years[0],2020+years[1]+1,1)),y=result,name=district))\n figdict[key].update_layout(\n yaxis_title=key,\n xaxis_title='year',\n margin={\"r\":60,\"t\":40,\"l\":60,\"b\":40}, \n )\n return [dcc.Graph(figure=figdict[key],\n style={\"height\":\"100%\",\"width\":\"100%\"})]\n\nif __name__==\"__main__\":\n app.run_server(debug=True, use_reloader=True)\n","sub_path":"finnal_Xicheng_map_visual_system/Mapapp2.py","file_name":"Mapapp2.py","file_ext":"py","file_size_in_byte":27281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"239804056","text":"from functools import reduce\r\nfrom operator import mul\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom torch.autograd import Variable\r\n\r\nimport utils\r\nfrom layer_norm import LayerNorm1D\r\nfrom layer_norm_lstm import LayerNormLSTMCell\r\nfrom utils import preprocess_gradients\r\n\r\n\r\n# @NOTE: we will only going to maintain the fast_meta_optimizer but not the one above.\r\n\r\n# this meta-optimizer is called fast as it already has some prior knowledge about the updating rule on our planet.\r\n# it only would like to learn theta <- f*theta - i*grad two parameters\r\nclass FastMetaOptimizer(nn.Module):\r\n\r\n def __init__(self, model, num_layers, hidden_size, num_workers_sim, use_cuda = True):\r\n super(FastMetaOptimizer, self).__init__()\r\n self.meta_model = model\r\n\r\n \"\"\"\r\n WHY magic 6 here? This is because preprocess_gradients(flat_grads): [d, 2*num_workers]\r\n print(flat_params.size()): [d, 1]\r\n print(loss.size()): [d, 1]\r\n print(self.alpha.size()): [d, num_workers]\r\n\r\n \r\n \"\"\"\r\n self.num_workers_sim = num_workers_sim\r\n self.linear1 = nn.Linear(3 * self.num_workers_sim + 2, self.num_workers_sim)\r\n self.alpha = Variable(\r\n torch.tensor([1.0 / self.num_workers_sim for i in range(self.num_workers_sim)]), requires_grad = True)\r\n if use_cuda:\r\n self.alpha = self.alpha.cuda()\r\n def hook(module, grad_input, grad_output):\r\n print(\"MODULE: {}\".format(\"META Optimizer\"))\r\n print(\"HOOK OUTPUT: {}\".format(grad_output))\r\n \r\n # self.linear1.register_backward_hook(hook)\r\n \r\n \r\n def forward(self, x):\r\n # Gradients preprocessing\r\n x = self.linear1(x)\r\n return x\r\n\r\n # this is only a trunk, as there is even not an LSTM\r\n def reset_params(self, keep_states=False, model=None, use_cuda=False):\r\n self.meta_model.reset()\r\n self.meta_model.copy_params_from(model)\r\n self.alpha = Variable(self.alpha.data)\r\n # if keep_states:\r\n # self.alpha = Variable(self.alpha.data)\r\n # else:\r\n # self.alpha = Variable(\r\n # torch.tensor([1.0 / self.num_workers_sim for i in range(self.num_workers_sim)])) ## reset the weight\r\n if use_cuda:\r\n self.alpha = self.alpha.cuda()\r\n\r\n def meta_update(self, model_with_grads, grads_per_workers, loss, lr = 0.01):\r\n # First we need to create a flat version of parameters and gradients\r\n ## [ # _worker, d, 2]\r\n # for i in range(self.num_workers_sim):\r\n # grads_per_workers[i] = torch.cat(grads_per_workers[i]) # cat the grads\r\n\r\n flat_params = self.meta_model.get_flat_params().unsqueeze(-1)\r\n\r\n self.old_alpha = self.alpha.clone()\r\n # print(self.old_alpha.data.size())\r\n self.alpha = self.alpha.expand(flat_params.size(0), -1)\r\n\r\n loss = loss.expand_as(flat_params)\r\n\r\n # the model implemented here is in fact a basic RNN (with self.alpha as the hidden state)\r\n # the function preprocess gradients is used to make the input of the net balanced (not too large or too small)\r\n inputs = Variable(torch.cat((*map(preprocess_gradients, grads_per_workers), flat_params.data, loss), 1))\r\n # eta = 1.5\r\n inputs = torch.cat((inputs, self.alpha), 1)\r\n self.alpha = F.softmax(self(inputs).mean(\r\n dim=0)) # this means the gradients, the location and the loss has been fed into the linear model defined in forward\r\n # Meta update itself, using the rule of SGD\r\n cuda_0 = torch.device(\"cuda:0\")\r\n \r\n for i in range(self.num_workers_sim):\r\n flat_params = flat_params - lr * (self.alpha[i].to(cuda_0) * grads_per_workers[i].to(cuda_0)) / self.num_workers_sim\r\n # flat_params = flat_params.view(-1)\r\n self.meta_model.set_flat_params(flat_params)\r\n\r\n # Finally, copy values from the meta model to the normal one.\r\n self.meta_model.copy_params_to(model_with_grads)\r\n # print(self.alpha)\r\n return self.meta_model.model\r\n\r\n\r\n# implement the classical GAR, i.e. the mean vector, as a comparison\r\nclass ClassicMetaOptimizer(nn.Module):\r\n \"\"\"\r\n To implement it as a class may be better to maintain the clarity of the main\r\n \"\"\"\r\n def __init__(self, model, num_workers_sim, aggregation_method):\r\n super(ClassicMetaOptimizer, self).__init__()\r\n self.meta_model = model\r\n self.num_workers_sim = num_workers_sim\r\n self.aggregation_method = aggregation_method\r\n\r\n def reset_params(self, keep_states=False, model=None, use_cuda=False):\r\n self.meta_model.reset()\r\n self.meta_model.copy_params_from(model)\r\n return\r\n\r\n def meta_update(self, model_with_grads, grads_per_workers, loss):\r\n flat_params = self.meta_model.get_flat_params().unsqueeze(-1)\r\n flat_params = flat_params - 0.01 * self.aggregation_method(grads_per_workers)\r\n flat_params = flat_params.view(-1)\r\n self.meta_model.set_flat_params(flat_params)\r\n self.meta_model.copy_params_to(model_with_grads)\r\n return self.meta_model.model\r\n\r\n\r\n# A helper class that keeps track of meta updates\r\n# It's done by replacing parameters with variables and applying updates to\r\n# them.\r\nclass MetaModel:\r\n\r\n def __init__(self, model):\r\n self.model = model\r\n def hook(module, grad_input, grad_output):\r\n print(\"MODULE: {}\".format(\"UNDERLYING MODEL\"))\r\n print(\"HOOK OUTPUT: {}\".format(grad_output))\r\n # self.model.register_backward_hook(hook)\r\n \r\n\r\n def reset(self):\r\n _queue = [self.model]\r\n while(len(_queue) > 0):\r\n cur = _queue[0]\r\n _queue = _queue[1:] # dequeue\r\n if(\"weight\" in cur._parameters):\r\n cur._parameters['weight'] = Variable(cur._parameters['weight'].data)\r\n if(\"bias\" in cur._parameters and not (cur._parameters[\"bias\"] is None)):\r\n cur._parameters['bias'] = Variable(cur._parameters['bias'].data)\r\n for module in cur.children():\r\n _queue.append(module)\r\n \r\n def get_flat_params(self):\r\n params = []\r\n _queue = [self.model]\r\n while(len(_queue) > 0):\r\n cur = _queue[0]\r\n _queue = _queue[1:] # dequeue\r\n if(\"weight\" in cur._parameters):\r\n params.append(cur._parameters['weight'].view(-1))\r\n if(\"bias\" in cur._parameters and not (cur._parameters[\"bias\"] is None)):\r\n params.append(cur._parameters['bias'].view(-1))\r\n for module in cur.children():\r\n _queue.append(module)\r\n return torch.cat(params)\r\n\r\n def set_flat_params(self, flat_params):\r\n # Restore original shapes (which is actually required during the training phase)\r\n offset = 0\r\n _queue = [self.model]\r\n while(len(_queue) > 0):\r\n cur = _queue[0]\r\n _queue = _queue[1:] # dequeue\r\n weight_flat_size = 0\r\n bias_flat_size = 0\r\n if(\"weight\" in cur._parameters):\r\n weight_shape = cur._parameters['weight'].size()\r\n weight_flat_size = reduce(mul, weight_shape, 1)\r\n cur._parameters['weight'] = flat_params[offset:offset + weight_flat_size].view(*weight_shape)\r\n if(\"bias\" in cur._parameters and not (cur._parameters[\"bias\"] is None)):\r\n bias_shape = cur._parameters['bias'].size()\r\n bias_flat_size = reduce(mul, bias_shape, 1)\r\n cur._parameters['bias'] = flat_params[offset + weight_flat_size:offset + weight_flat_size + bias_flat_size].view(*bias_shape)\r\n offset += weight_flat_size + bias_flat_size\r\n for module in cur.children():\r\n _queue.append(module)\r\n\r\n def copy_params_from(self, model):\r\n for modelA, modelB in zip(self.model.parameters(), model.parameters()):\r\n modelA.data.copy_(modelB.data)\r\n\r\n def copy_params_to(self, model):\r\n for modelA, modelB in zip(self.model.parameters(), model.parameters()):\r\n modelB.data.copy_(modelA.data)\r\n","sub_path":"meta_optimizer.py","file_name":"meta_optimizer.py","file_ext":"py","file_size_in_byte":8281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"519545342","text":"import sys\nimport os\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras import optimizers\nfrom keras.models import Model, Sequential\nfrom keras.layers import Dropout, Flatten, Dense, Input, ELU, BatchNormalization, Activation\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D\nfrom keras.callbacks import CSVLogger\nimport tensorflow as tf\nimport numpy\nfrom keras import backend as K\nimport math\nfrom keras import initializers\n# Các version tương ứng với việc điều chỉnh tham số của mô hình classifier\n#IX : Batch, ReLu, Dropout = 0.25\n#XI: Batch, ELu, Dropout = 0.25\n#XII: BED(0.1)\n#XIIV: BED(0.5)\n#XIV: BED(0.5),modify adam\n#XV: ED(0.5),modify adam\n#XVI: ED(0.5),modify adam. correcting Batch?\n#XVII: BED(0.5), Batch to Flatten\n#XVIII: double dense stack\n#XIV: double dense, remove Batch\n#XX: remove batch, dense = 500 => adjust batchsize to 16\n#XXIII: HO\n#current: XXIV\nchannel_axis = 1 if K.image_dim_ordering() == \"th\" else -1\nchannel_axis = 1\ncpuset = 0 #1 or other\nif cpuset == 1:\n cpulist = [\"gpu(2)\", \"gpu(3)\"]\nelse:\n cpulist = [\"gpu(0)\", \"gpu(1)\"]\n\n\nimg_width, img_height = 250, 250\nepochs = 200\nbatch_size = 16\nimg_channels = 3\nclasses_num = 9\n\ntrain_data_path = './training_seg_5'\nvalidation_data_path = './int_validation_seg_5'\next_data_path = './ext_validation_seg_6'\n\noriginal_dir = './training_seg_6'\nint_val_dir = './int_val_seg_6'\nint_test_dir = './int_test_seg_6'\n\n#custom filter\ndef filter_layer(x):\n red_x = x[:,:,:,0]\n blue_x = x[:,:,:,2]\n green_x = x[:,:,:,1]\n red_x = tf.expand_dims(red_x, axis=3)\n blue_x = tf.expand_dims(blue_x, axis=3)\n green_x = tf.expand_dims(green_x, axis=3)\n #output = tf.concat([red_x, blue_x], axis=3)\n output = green_x\n return output\n\n#model\ninput = Input(shape=(img_channels, img_height, img_width))\n#x = Lambda(filter_layer)(input)\nx = Conv2D(64, (5, 5), padding='same')(input)\nx = ELU(alpha=1.0)(x)\nx = BatchNormalization(axis=channel_axis)(x)\n\nx = Conv2D(64, (2, 2), padding=\"same\")(x)\nx = ELU(alpha=1.0)(x)\nx = BatchNormalization(axis=channel_axis)(x)\nx = MaxPooling2D(pool_size=(3, 3))(x)\n\nx = Conv2D(256, (2, 2), padding=\"same\")(x)\nx = ELU(alpha=1.0)(x)\nx = BatchNormalization(axis=channel_axis)(x)\nx = MaxPooling2D(pool_size=(2, 2))(x)\n\nx = Conv2D(256, (5, 5), padding=\"same\")(x)\nx = ELU(alpha=1.0)(x)\nx = BatchNormalization(axis=channel_axis)(x)\nx = MaxPooling2D(pool_size=(3, 3))(x)\n\nx = Conv2D(128, (4, 4), padding=\"same\")(x)\nx = ELU(alpha=1.0)(x)\nx = BatchNormalization(axis=channel_axis)(x)\nx = MaxPooling2D(pool_size=(4, 4))(x)\n\nx = Flatten()(x)\nx = Dense(500)(x)\nx = ELU(alpha=1.0)(x)\nx = BatchNormalization(axis=channel_axis)(x)\n#x = Dense(500)(x)\n#x = ELU(alpha=1.0)(x)\n#x = BatchNormalization(axis=channel_axis)(x)\n\nx = Dropout(0.5)(x)\n\noutput = Dense(classes_num, activation='softmax')(x)\nmodel = Model(inputs=input, outputs=output)\nmodel.summary()\n\n#model = multi_gpu_model(model, 4)\n\nadam = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.00005)\n\nmodel.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'],\n context=cpulist)\n\n# generator\ntrain_datagen = ImageDataGenerator(rescale=1./255,\n shear_range=0.2,\n zoom_range=0.2,\n rotation_range=45,\n height_shift_range=0.2,\n width_shift_range=0.2,\n vertical_flip=True,\n horizontal_flip=True)\n\ntest_datagen = ImageDataGenerator(rescale=1./255)\n\n# #without val\n# train_generator = train_datagen.flow_from_directory(\n# train_data_path,\n# target_size=(img_height, img_width),\n# batch_size=batch_size,\n# class_mode='categorical')\n#\n# validation_generator = test_datagen.flow_from_directory(\n# validation_data_path,\n# target_size=(img_height, img_width),\n# batch_size=batch_size,\n# class_mode='categorical')\n#\n# ext_validation_generator = test_datagen.flow_from_directory(\n# ext_data_path,\n# target_size=(img_height, img_width),\n# batch_size=1,\n# shuffle=False,\n# class_mode=None)\n#\n# nb_train_samples = train_generator.samples\n# nb_validation_samples = validation_generator.samples\n\n\n# withval\ntrain_generator = train_datagen.flow_from_directory(\n original_dir,\n target_size=(img_height, img_width),\n batch_size=batch_size,\n class_mode='categorical')\n\nint_val_generator = test_datagen.flow_from_directory(\n int_val_dir,\n target_size=(img_height, img_width),\n batch_size=batch_size,\n class_mode='categorical')\n\nint_test_generator = test_datagen.flow_from_directory(\n int_test_dir,\n target_size=(img_height, img_width),\n batch_size=batch_size,\n class_mode='categorical')\n\nnb_train_samples = train_generator.samples\nnb_validation_samples = int_val_generator.samples\n\next_validation_generator = test_datagen.flow_from_directory(\n ext_data_path,\n target_size=(img_height, img_width),\n batch_size=1,\n shuffle=False,\n class_mode=None)\n\n\n\"\"\"\n#withval\ntrain_generator = train_datagen.flow_from_directory(\n original_dir,\n target_size=(img_height, img_width),\n batch_size=batch_size,\n class_mode='categorical')\n\nint_val_generator = test_datagen.flow_from_directory(\n int_val_dir,\n target_size=(img_height, img_width),\n batch_size=batch_size,\n class_mode='categorical')\n\nint_test_generator = test_datagen.flow_from_directory(\n int_test_dir,\n target_size=(img_height, img_width),\n batch_size=batch_size,\n class_mode='categorical')\n\nnb_train_samples = train_generator.samples\nnb_validation_samples = int_val_generator.samples\n\"\"\"\n\n#load weight\n#model.load_weights('./weight/XVII-4-cont-200-reseg6_weights.85-0.37.hdf5')\n\n#callback\nfrom keras import callbacks\nfrom keras.callbacks import TensorBoard\nimport numpy as np\n\nclass TensorBoardWrapper(TensorBoard):\n '''Sets the self.validation_data property for use with TensorBoard callback.'''\n\n def __init__(self, batch_gen, nb_steps, **kwargs):\n super().__init__(**kwargs)\n self.batch_gen = batch_gen # The generator.\n self.nb_steps = nb_steps # Number of times to call next() on the generator.\n\n def on_epoch_end(self, epoch, logs):\n # Fill in the `validation_data` property. Obviously this is specific to how your generator works.\n # Below is an example that yields images and classification tags.\n # After it's filled in, the regular on_epoch_end method has access to the validation_data.\n imgs, tags = None, None\n for s in range(self.nb_steps):\n ib, tb = next(self.batch_gen)\n if imgs is None and tags is None:\n imgs = np.zeros((self.nb_steps * ib.shape[0], *ib.shape[1:]), dtype=np.float32)\n tags = np.zeros((self.nb_steps * tb.shape[0], *tb.shape[1:]), dtype=np.uint8)\n imgs[s * ib.shape[0]:(s + 1) * ib.shape[0]] = ib\n tags[s * tb.shape[0]:(s + 1) * tb.shape[0]] = tb\n self.validation_data = [imgs, tags, np.ones(imgs.shape[0]), 0.0]\n return super().on_epoch_end(epoch, logs)\n\n\n#callbacks = [TensorBoardWrapper(validation_generator, nb_steps=nb_validation_samples // batch_size, log_dir='./tf-log', histogram_freq=1,\n # batch_size=int(batch_size), write_graph=False, write_grads=True)]\n\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\ncallbacks = [\n #EarlyStopping(monitor='val_loss', patience=10, verbose=0),\n ModelCheckpoint('./weight/XXIV-3-200-reseg6_weights.{epoch:02d}-{val_loss:.2f}.hdf5', monitor='val_loss', save_best_only=True, verbose=2)\n ,CSVLogger('./log/XXIV-3-200-reseg6-log.csv',append=False, separator=',')\n # ,TensorBoardWrapper(int_val_generator, nb_steps=math.ceil(nb_validation_samples / batch_size), log_dir='./tf-log-XXIV-rg6/',\n # histogram_freq=5,\n # batch_size=int(batch_size), write_graph=True, write_grads=False, write_images=False)\n ]\n\n#train\nhistory = model.fit_generator(train_generator,\n steps_per_epoch=nb_train_samples // batch_size,\n epochs=epochs,\n validation_data=int_val_generator,\n validation_steps=math.ceil(nb_validation_samples / batch_size),verbose=2,\n callbacks=callbacks,\n workers=32\n )\nhist = history.history\nmodel.save_weights('./weight/XXIII-3-50-reseg6_weights.hdf5')\nmodel.evaluate_generator(int_test_generator)\nloss = np.array(hist[\"loss\"])\ny_predict = model.predict_generator(ext_validation_generator, verbose = 2)\n#export result\nnumpy.savetxt(\"./result/XVII-predict-4-0.37-reseg6.csv\", y_predict, delimiter=\",\")\nnumpy.savetxt(\"./log/XXIII-1-200-reseg6-log-loss-2.csv\", loss, delimiter=\",\")\n\n\n\"\"\"\nfilelist = ext_validation_generator.filenames\nfilelist = array(filelist)\nnumpy.savetxt(\"./result/vallist-IX-reseg5.csv\", filelist, delimiter=\"/n\", fmt='%s')\n\"\"\"\n","sub_path":"Cellclassifier-XXIV.py","file_name":"Cellclassifier-XXIV.py","file_ext":"py","file_size_in_byte":9178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"344051470","text":"import re\n\n# files to parse will come in small text chunks.\n\ndef process_chunk(chunk_file): # chunk file should be /chapter/number.txt\n\t'''\n\tFunction receives text in line chunks (file), where every line has a USFM\n\tmarker, or an exception is thrown. Function returns a document for\n\tinsertion into the database.\n\t'''\n\tf = open(chunk_file, 'r')\n\ttry:\n\t\tm = re.search('v\\s(\\d+)', f.read())\n\t\tinsert_verse = m.group(1)\n\texcept AttributeError:\n\t\tinsert_verse = None \n\tf.close()\n\n\tdoc = {\n\t\t\"_id\": None, \n\t\t\"p_markers\": [],\n\t\t\"s_markers\": [],\n\t\t\"verses\" : {}\n\t}\n\twith open(chunk_file, 'r') as f:\n\t\tparams = chunk_file.split('/')\n\t\tchapter_string = int(params[-2])\n\t\tdoc['_id'] = chapter_string\n\n\t\tfor line in f:\n\t\t\tif line[0:2] == '\\c':\n\t\t\t\tdelim = line.split(' ')\n\t\t\t\tif int(delim[1]) != doc['_id']:\n\t\t\t\t\traise ValueError(\"USFM chapter marking does not match file info.\")\n\t\t\telif line[0:2] == \"\\n\":\n\t\t\t\tpass\n\t\t\telif line[1:2] == 'v':\n\t\t\t\tdelim = line.split(' ')\n\t\t\t\tverse = delim[1]\n\t\t\t\tinsert_verse = verse\n\t\t\t\tcontent = \" \".join(delim[2:])\n\t\t\t\tdoc[\"verses\"][verse] = content\n\t\t\telif line[0:2] == \"\\p\":\n\t\t\t\tdoc[\"p_markers\"].append(insert_verse)\n\t\t\telif line[0:2] == \"\\s\":\n\t\t\t\tdoc[\"s_markers\"].append(insert_verse)\n\t\t\telse:\n\t\t\t\tprint (\"Offending line: \" + line, \"File: \" + chunk_file)\n\t\t\t\traise ValueError(\"Invalid USFM found.\") \n\t\t\t\t\n\treturn doc\n","sub_path":"usfm/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"373807107","text":"\"\"\"\nRepresentative Looping Frequency Bootstrap and 95% Confidence Interval\n--------------------------------------------------------------------------------\nAuthor: Soichi Hirokawa\nLast Modified: January 7, 2020\nLicense: MIT\n\nDescription\n--------------------------------------------------------------------------------\nThis script generates the subfigure in the manuscript which shows a \nrepresentative bootstrap replicate distribution and the 95% confidence interval.\n\nNotes\n--------------------------------------------------------------------------------\nThis script is designed to be executed from the `code/figures` directory and uses \na relative path to load the necessary CSV files.\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport vdj.io\nimport vdj.viz\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nvdj.viz.plotting_style()\n\n\n# Upload V4-57-1 sequence looping dataset\ndata = pd.read_csv('../../data/compiled_looping_events.csv', comment='#')\ndata = data[(data['mutant']=='WT12rss') & (data['hmgb1']==80) & \n (data['salt']=='Mg')]\n\npercentiles = [2.5, 97.5]\ncol_names = [\"bs_95_low\", \"bs_95_high\"]\nbs_reps = int(1E6)\nbs_df = pd.DataFrame([])\nsampling = np.random.choice(data['n_loops'].values,size=(len(data), bs_reps),\n replace=True)\nloop_freq = np.sum(sampling, axis=0) / len(data)\ndf_dict = {'mutant':'V4-57-1', 'salt':'Mg', 'hmgb1':80,\n 'n_loops':data['n_loops'].sum(), 'n_beads':len(data),\n 'loops_per_bead':data['n_loops'].sum() / len(data)}\ncomputed_percentiles = np.percentile(loop_freq, percentiles)\nfor i,col in zip(computed_percentiles,col_names):\n df_dict[col] = i\n\nbs_df = bs_df.append(df_dict, ignore_index=True)\n\n# Form ECDFs\nx = list(np.sort(loop_freq))\ny = list(np.arange(0, bs_reps, 1) / bs_reps)\ny_short = [-1, 2]\ntext_perc = '95%'\n\ntrue_loops_val = y[x.index(bs_df['loops_per_bead'].values[0])]\n#%%\nfig, ax = plt.subplots(1, 1, figsize=(2,4))\nax.set_xlim([-1, 250000])\nax.hist(loop_freq, color='tomato', bins=20, zorder=10,\n orientation='horizontal')\nax.axhline(bs_df['loops_per_bead'].values[0], 0, true_loops_val, \n color='slategrey', ls='--', alpha=0.4, lw=2)\n\nax.scatter(true_loops_val, bs_df['loops_per_bead'], color='slategrey',\n s=50, alpha=0.7)\nax.vlines(true_loops_val, bs_df[col_names[0]].values[0],\n bs_df[col_names[1]].values[0], alpha=0.7, \n ls='-', color='slategrey', lw=3)\n\n_ = ax.set_ylim([loop_freq.min(),loop_freq.max()])\n_ = ax.set_ylabel('bootstrapped\\nlooping frequency', fontsize=16)\n_ = ax.set_xlabel('counts', fontsize=16)\n_ = ax.set_ylim([0, 0.6])\nytick = np.arange(0.0,0.7,0.1)\n_ = ax.set_yticks(ytick)\n_ = ax.set_yticklabels(['%.1f' %n for n in ytick])\n_ = ax.set_xticklabels([])\n\nfig.savefig('../../figures/SubFigXB_reference_bootstrap.pdf',\n bbox_inches='tight', facecolor='white')\n# %%\n","sub_path":"code/figures/SubFig_reference_bootstrap.py","file_name":"SubFig_reference_bootstrap.py","file_ext":"py","file_size_in_byte":2886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"172839281","text":"from django.urls import path\nfrom . import views\n\napp_name = 'serviceApp'\n\nurlpatterns = [\n path('download/', views.download, name='download'), # 资料下载\n path('getDoc//', views.getDoc, name='getDoc'), # 单项资料下载\n path('platform/', views.platform, name='platform'), # 人脸识别开放平台\n]","sub_path":"JiangGuoProject/serviceApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"166990479","text":"import plotly.offline as py\nimport plotly.graph_objs as go\nimport plotly.figure_factory as ff\n\nimport pandas as pd\nimport numpy as np\n\n\ndef plotlinechart(data_list, countries, plot_name):\n data_list.index = data_list.index.strftime(\"%Y-%m-%d\")\n\n fig = go.Figure()\n\n if not countries:\n countries = data_list['country'].unique()\n\n for country in countries:\n df = data_list['count'].loc[data_list['country'] == country]\n x = df.index\n values = df.values\n fig.add_trace(go.Scatter(\n x=x,\n y=values,\n name=country, # Style name/legend entry with html tags\n connectgaps=True # override default to connect the gaps\n ))\n fig.update_layout(yaxis_type=\"log\")\n chart = py.plot(\n fig,\n show_link=False,\n output_type='div',\n include_plotlyjs=False,\n auto_open=False,\n )\n\n return chart\n\n\ndef curva_evolucao_confirmados(base,lista_paises):\n confirmed_cases_t2 = base.reset_index()\n confirmed_cases_t2.columns = ['variable','Country/Region','value']\n first_date = confirmed_cases_t2.loc[confirmed_cases_t2['value']>=50].sort_values(by=['Country/Region','variable']).groupby('Country/Region').head(1)\n first_date = first_date[['Country/Region', 'variable']]\n first_date.columns = ['Country/Region', 'first_date']\n confirmed_cases_t3 = confirmed_cases_t2.merge(first_date,on='Country/Region', how='left')\n confirmed_cases_t4 = confirmed_cases_t3.loc[~confirmed_cases_t3['first_date'].isna()]\n confirmed_cases_t4['var_dates'] = (confirmed_cases_t4['variable'] - confirmed_cases_t4['first_date']).dt.days\n confirmed_cases_t5 = confirmed_cases_t4.loc[confirmed_cases_t4['var_dates']>=0].sort_values(by=['Country/Region','var_dates']).groupby(['Country/Region','var_dates']).agg({'value':sum}).reset_index()\n paises_show = confirmed_cases_t5.groupby('Country/Region')['var_dates'].count().reset_index()\n confirmed_cases_t5 = confirmed_cases_t5.loc[confirmed_cases_t5['Country/Region'].isin(list(paises_show.loc[paises_show['var_dates']>=5,'Country/Region']))]\n confirmed_cases_t6 = confirmed_cases_t5.loc[confirmed_cases_t5['Country/Region'].isin(lista_paises)]\n curva_evolucao_confirmados = confirmed_cases_t6\n return curva_evolucao_confirmados\n\ndef plot_curva_evolucao_confirmados(dados_graf,dados_paises,plot_china=0):\n fig = go.Figure()\n \n dados_graf = dados_graf.merge(dados_paises[['Country/Region','Order']].drop_duplicates(),on='Country/Region').sort_values(by=['Order','var_dates'])\n \n dados_graf = dados_graf.loc[dados_graf['var_dates']<=dados_graf.loc[dados_graf['Country/Region']!='China','var_dates'].max()]\n \n if plot_china==0:\n dados_graf = dados_graf.loc[~dados_graf['Country/Region'].isin(['China'])]\n\n for i in list(dados_graf['Country/Region'].unique()):\n fig.add_trace(go.Scatter(x=list(dados_graf.loc[dados_graf['Country/Region']==i,'var_dates']), \n y=list(dados_graf.loc[dados_graf['Country/Region']==i,'value']), name=i,mode='lines',line_shape='spline',\n line=dict(color=dados_paises.loc[dados_paises['Country/Region']==i,'Color'].values[0],width=dados_paises.loc[dados_paises['Country/Region']==i,'Width'].values[0])))\n\n fig.update_layout(yaxis_type=\"log\",paper_bgcolor='rgba(0,0,0,0)', plot_bgcolor='rgba(0,0,0,0)',title='Evolução de casos confirmados para países selecionados')\n\n fig.update_xaxes(title_text='# Dias desde a confirmação de 50 casos', showline=True, linewidth=1, linecolor='rgb(128,128,128)',showgrid=True, gridwidth=0.5, gridcolor='rgb(240,240,240)')\n fig.update_yaxes(title_text='# casos (k)', showline=True, linewidth=1, linecolor='rgb(128,128,128)',showgrid=True, gridwidth=0.5, gridcolor='rgb(240,240,240)')\n\n chart = py.plot(\n fig,\n show_link=False,\n output_type='div',\n include_plotlyjs=False,\n auto_open=False,\n )\n \n return chart\n\ndef progressao_confirmados(base,lista_paises):\n \n base_analise = curva_evolucao_confirmados(base,lista_paises)\n\n base_agrega = pd.DataFrame()\n\n for j in list(base_analise['Country/Region'].unique()):\n base_pais = base_analise.loc[base_analise['Country/Region']==j].sort_values(by='var_dates')\n for i in list(base_pais['var_dates']):\n try:\n qtde_dias = int(base_pais.loc[base_pais['value']>=int(base_pais.loc[base_pais['var_dates']==i,'value'])*2].head(1)['var_dates'])-i\n except:\n qtde_dias = np.nan\n #print(i,j,qtde_dias)\n base_valores = pd.DataFrame(data={'pais':j,'data':i,'qtde_dias':qtde_dias},index={0})\n base_agrega = pd.concat([base_agrega,base_valores])\n\n base_agrega = base_agrega.reset_index(drop=True)\n\n base_agrega = base_agrega.loc[~base_agrega['qtde_dias'].isna()]\n\n base_agrega.rename(columns={'pais':'Country/Region','data':'var_dates','qtde_dias':'value'},inplace=True)\n\n base_agrega = base_agrega.loc[base_agrega['Country/Region'].isin(lista_paises)]\n\n return base_agrega\n\ndef plot_progressao_confirmados(dados_graf,dados_paises,plot_china=0):\n fig = go.Figure()\n \n dados_graf = dados_graf.merge(dados_paises[['Country/Region','Order']].drop_duplicates(),on='Country/Region').sort_values(by=['Order','var_dates'])\n \n if plot_china==0:\n dados_graf = dados_graf.loc[~dados_graf['Country/Region'].isin(['China'])]\n\n for i in list(dados_graf['Country/Region'].unique()):\n fig.add_trace(go.Scatter(x=list(dados_graf.loc[dados_graf['Country/Region']==i,'var_dates']), \n y=list(dados_graf.loc[dados_graf['Country/Region']==i,'value']), name=i,mode='lines',line_shape='spline',\n line=dict(color=dados_paises.loc[dados_paises['Country/Region']==i,'Color'].values[0],width=dados_paises.loc[dados_paises['Country/Region']==i,'Width'].values[0])))\n\n fig.update_layout(paper_bgcolor='rgba(0,0,0,0)', plot_bgcolor='rgba(0,0,0,0)', title='Variação da velocidade de progressão de contaminação para países selecionados')\n\n fig.update_xaxes(title_text='# Dias desde a confirmação de 50 casos', showline=True, linewidth=1, linecolor='rgb(128,128,128)',showgrid=True, gridwidth=0.5, gridcolor='rgb(240,240,240)')\n fig.update_yaxes(title_text='# dias para dobrar número de casos', showline=True, linewidth=1, linecolor='rgb(128,128,128)',showgrid=True, gridwidth=0.5, gridcolor='rgb(240,240,240)')\n\n chart = py.plot(\n fig,\n show_link=False,\n output_type='div',\n include_plotlyjs=False,\n auto_open=False,\n )\n \n return chart\n\ndef acumulo_progressao_confirmados(base,lista_paises):\n\n base_analise = curva_evolucao_confirmados(base,lista_paises)\n\n m = int((5 * round(base_analise['var_dates'].max()/5))/5) + 1\n\n dias = [0] + list(range(5, (m * 5)+1, 5))\n\n base_análise_2 = base_analise.loc[base_analise['var_dates'].isin(dias)]\n\n base_analise_3 = base_análise_2.pivot(index='Country/Region',columns='var_dates',values='value')\n\n base_analise_3.columns = ['casos_'+str(i) for i in list(base_analise_3.columns)]\n\n cols_antes = len(base_analise_3.columns)\n\n for i in range(0,len(list(base_analise_3.columns))):\n #base_analise_3[list(base_analise_3.columns)[i+1] + '_' + str(dias[i])] = base_analise_3.iloc[:,i+1] / base_analise_3.iloc[:,i]\n base_analise_3[str(dias[i+1])] = base_analise_3.iloc[:,i+1] / base_analise_3.iloc[:,i]\n\n #base_analise_3['0'] = 1\n\n base_analise_3 = base_analise_3[list(base_analise_3.columns[cols_antes:])].reset_index().melt(id_vars=['Country/Region'])\n\n base_analise_3['variable'] = base_analise_3['variable'].astype(int)\n\n base_analise_3 = base_analise_3.sort_values(by=['Country/Region','variable'])\n\n base_analise_3['value'] = base_analise_3['value'].fillna(0)\n\n base_analise_3 = base_analise_3.groupby(by=['Country/Region','variable']).sum().groupby(level=[0]).cumprod().reset_index()\n\n maximo_valor = base_analise_3.groupby('Country/Region')['value'].max().reset_index()\n\n maximo_valor.rename(columns={'value':'value_max'},inplace=True)\n\n aux_max = base_analise_3.merge(maximo_valor,on='Country/Region',how='left')\n\n aux_max1 = aux_max.loc[(aux_max['value']=aux_max['value_max']]\n\n base_analise_4 = pd.concat([aux_max1,aux_max2.groupby('Country/Region').head(1)]).sort_values(by=['Country/Region','variable'])\n\n base_analise_4.drop(columns='value_max',inplace=True)\n\n coloca_zero = pd.DataFrame(data={'Country/Region':list(base_analise_4['Country/Region'].unique())})\n\n coloca_zero['variable'] = 0\n coloca_zero['value'] = 1\n\n base_analise_4 = pd.concat([base_analise_4,coloca_zero])\n\n base_analise_4 = base_analise_4.sort_values(by=['Country/Region','variable'])\n \n base_analise_4.columns = ['Country/Region', 'var_dates', 'value']\n \n return base_analise_4\n\ndef plot_acumulo_progressao_confirmados(dados_graf,dados_paises,plot_china=0):\n fig = go.Figure()\n \n dados_graf = dados_graf.merge(dados_paises[['Country/Region','Order']].drop_duplicates(),on='Country/Region').sort_values(by=['Order','var_dates'])\n \n dados_graf = dados_graf.loc[dados_graf['var_dates']<=dados_graf.loc[dados_graf['Country/Region']!='China','var_dates'].max()]\n \n if plot_china==0:\n dados_graf = dados_graf.loc[~dados_graf['Country/Region'].isin(['China'])]\n\n for i in list(dados_graf['Country/Region'].unique()):\n fig.add_trace(go.Scatter(x=list(dados_graf.loc[dados_graf['Country/Region']==i,'var_dates']), \n y=list(dados_graf.loc[dados_graf['Country/Region']==i,'value']), name=i,mode='lines',line_shape='spline',\n line=dict(color=dados_paises.loc[dados_paises['Country/Region']==i,'Color'].values[0],width=dados_paises.loc[dados_paises['Country/Region']==i,'Width'].values[0])))\n\n fig.update_layout(paper_bgcolor='rgba(0,0,0,0)', plot_bgcolor='rgba(0,0,0,0)', title='Progressão acumulada de contaminação em relação ao ponto 0 para países selecionados')\n\n fig.update_xaxes(title_text='# Dias desde a confirmação de 50 casos', showline=True, linewidth=1, linecolor='rgb(128,128,128)',showgrid=True, gridwidth=0.5, gridcolor='rgb(240,240,240)')\n fig.update_yaxes(title_text='Número de vezes em relação ao ponto 0, acumulado', showline=True, linewidth=1, linecolor='rgb(128,128,128)',showgrid=True, gridwidth=0.5, gridcolor='rgb(240,240,240)')\n\n chart = py.plot(\n fig,\n show_link=False,\n output_type='div',\n include_plotlyjs=False,\n auto_open=False,\n )\n \n return chart\n\ndef projecao_brasil(base,lista_paises,lista_paises2):\n base_analise = curva_evolucao_confirmados(base,lista_paises)\n base_acumulo = acumulo_progressao_confirmados(base,lista_paises)\n\n real_brasil = base_analise.loc[base_analise['Country/Region']=='Brazil']\n\n base_acumulo = base_acumulo.loc[base_acumulo['Country/Region'].isin(['Brazil']+lista_paises2)]\n\n aux_acumulo = base_acumulo.pivot(index='var_dates',columns='Country/Region',values='value').reset_index()\n\n lista_pontos = list(aux_acumulo['var_dates'].unique())\n\n ponteiro = [i for i,x in enumerate(lista_pontos) if x == aux_acumulo.loc[aux_acumulo['Brazil'].isna()].head(1)['var_dates'].values[0]][0]\n\n aux_ajusta_previsao = base_acumulo.loc[base_acumulo['var_dates']==(lista_pontos[ponteiro-1]),['Country/Region','value']]\n\n aux_ajusta_previsao.columns=['Country/Region','value_ajust']\n\n base_acumulo_2 = base_acumulo.merge(aux_ajusta_previsao,on=['Country/Region'])\n\n base_acumulo_2['value_new'] = base_acumulo_2['value']/base_acumulo_2['value_ajust']\n\n base_projecao = real_brasil.loc[real_brasil['var_dates']==lista_pontos[ponteiro-1],'value']\n\n base_acumulo_2['casos_projecao'] = base_acumulo_2['value_new'] * int(base_projecao)\n\n base_acumulo_3 = base_acumulo_2.pivot(index='var_dates',columns='Country/Region',values='casos_projecao').reset_index()\n\n base_acumulo_3 = base_acumulo_3.loc[base_acumulo_3['var_dates']<=30]\n\n aux_final = pd.DataFrame(pd.concat([base_acumulo_3[['var_dates']],real_brasil[['var_dates']]])['var_dates'].unique())\n\n aux_final.columns=['var_dates']\n\n aux_final = aux_final.sort_values(by='var_dates').merge(base_acumulo_3[['var_dates']+lista_paises2],on='var_dates',how='left').merge(real_brasil[['var_dates','value']],on='var_dates',how='left')\n\n aux_final.rename(columns={'value':'Brazil'},inplace=True)\n\n aux_final.loc[aux_final['var_dates'] matrix[i][j]:\n min_elem = matrix[i][j]\n return min_elem\n\ndef print_matrix(matrix,row,column):\n print(\"Матрица А: \")\n for i in range(row):\n for j in range(column):\n print(str(matrix[i][j]) + \" \",end=\"\")\n print()\n\ndef max_element_diag(matrix,row,column):\n min_count = 0\n if row < column:\n min_count = row\n else:\n min_count = column\n max_elem = matrix[0][0]\n for i in range(min_count):\n if max_elem < matrix[i][i]:\n max_elem = matrix[i][i]\n return max_elem\n\ndef sum_number(number):\n a = 0\n while number>0:\n a,number = a+number%10,number//10\n return a\n\ndef words(input_string):\n buffer = \"\"\n list_words = []\n for i in input_string:\n if i.isalnum():\n buffer += i\n elif i == \" \" and buffer == \"\":\n continue\n elif i== \" \":\n list_words.append(buffer)\n buffer = \"\"\n if buffer != \"\":\n list_words.append(buffer)\n return list_words\n\ndef len_string(string):\n count = 0\n for i in string:\n count += 1\n return count\n\ndef max_word(list_words):\n a = list_words[0]\n for i in list_words:\n if len_string(a) < len_string(i):\n a = i\n return a\n\n\nprint(\"Введите количество строк и столбцов матрицы А\")\nrow = int(input(\"Количество строк: \"))\ncolumn = int(input(\"Количество столбцов: \"))\nmatrix = [[0 for x in range(column)] for x in range(row)] # Initialization matrix\n\nfor i in range(row):\n for j in range(column):\n matrix[i][j]=int(input(\"Matrix[\" + str(i) + \"][\" + str(j) + \"]=\"))\n\nx = min_element(matrix,row,column)\nprint(\"Минимальный элемент: \" + str(x))\n\nprint_matrix(matrix,row,column)\ny = max_element_diag(matrix,row,column)\nprint(\"Сумма цифр максимального числа на диагонали (\" + str(y) + \") : \" + str(sum_number(y)))\n\ninput_string = input(\"Введите строку: \")\nlist_words = words(input_string)\n\nprint(\"Самое длинное слово: \" + max_word(list_words))\n\n","sub_path":"test/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"167678472","text":"# -*- coding: utf8 -*-\r\nfrom django import forms\r\nfrom django.utils.safestring import mark_safe\r\n\r\nDEFAULT_WIDTH = '758'\r\nDEFAULT_HEIGHT = 600\r\n\r\n\r\nclass LocationWidget(forms.widgets.Widget):\r\n def __init__(self, *args, **kw):\r\n self.map_width = kw.get(\"map_width\", DEFAULT_WIDTH)\r\n self.map_height = kw.get(\"map_height\", DEFAULT_HEIGHT)\r\n\r\n super(LocationWidget, self).__init__(*args, **kw)\r\n self.inner_widget = forms.widgets.HiddenInput()\r\n\r\n def render(self, name, value, *args, **kwargs):\r\n if isinstance(value, unicode):\r\n a, b = value.split(',')\r\n else:\r\n a, b = (19.701706, -101.195658,)\r\n lat, lng = float(a), float(b)\r\n\r\n js = '''\r\n\r\n\r\n\r\n\r\n\r\n ''' % dict(name=name, lat=lat, lng=lng)\r\n html = self.inner_widget.render(\"%s\" % name, \"%f,%f\" % (lat, lng), dict(id='id_%s' % name))\r\n html += \"
\" % (name,\r\n self.map_height)\r\n\r\n return mark_safe(html+js)\r\n\r\n\r\nclass LocationField(forms.Field):\r\n widget = LocationWidget\r\n widget.allow_tags = True\r\n\r\n def clean(self, value):\r\n if isinstance(value, unicode):\r\n a, b = value.split(',')\r\n else:\r\n a, b = value\r\n\r\n lat, lng = float(a), float(b)\r\n return \"%f,%f\" % (lat, lng)","sub_path":"cms/location.py","file_name":"location.py","file_ext":"py","file_size_in_byte":3368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"131068470","text":"# CMPT 120L 113 \n# Joseph McDonough \n# 3 Dec 2018\n# Version 1\n###\n\n##Version 1.0 Commit 3\n#Added injured Boolean. Player has a 10% chance to get injured at any given point when moving between locations\n#Injured cause probability of survival to go down but by using some medical supplies found around the map, they can\n#get their health back up\n#Put injured() atop each locale call so before the reach their new destination, the chance to get hurt occurs\n#Updated updateGame() to account for the use of meds\n##\n\nfrom random import randint\nfrom Locale import Locale\nfrom Player import Player\n\nheader = \"\\n~~~~~~~~~~~~\\n\"\ntitle = header + \" LAST STAND\" + header\nownership = \"\\nCopyright (c) 2018 Joseph McDonough, Joseph.McDonough1@marist.edu\"\nglobal moveLimit\nmoveLimit=randint(17,20)\nitemFound = \"ITEM FOUND: \"\nmapFound = False\nkeyFound = False\nvitaminsUsed = False\nuseableItems = [] #Need this because even when an item has 0 uses, I want it in the inventory, but can't let it be used\ninjured = False\nplayer = Player(\" \",\" \")\n\n\n#locales\nhome = Locale(\"apartment\", (\"\\nYou arrive back at home and now have more resources. You are still limited and decide \"\n \"to go back and search in the morning. You feel good and have confidence that you'll be able to \"\n \"outlast the quarantine and make it home. You are still patiently waiting to hopefully stumble upon \"\n \"your neighbor's key...\"),1,3)\n\nstreet = Locale(\"street\",(\"\\nYou don't really know where to go and so explore the streets. You discover a dead body \"\n \"that seems to have been there for a few days. You notice something shiny sticking out of the breast \"\n \"pocket. You take it out and notice it is a key. There are a million places where this can go but you take it \"\n \"regardless...\"), 5,10)\nstreet.addItem(\"Unknown key\",0)\n\nsubway = Locale(\"subway station\",(\"\\nYou return to the subway station. This is the place where you woke up \"\n \"that first night. It appears as if the tracks haven't been used in days and its probably going \"\n \"to stay that way. You look around and do not find anything of much use...\"), -5,-2)\n \nsupermarket = Locale(\"supermarket\",(\"\\nYou spot this massive store in the distance with all of the lights out. \"\n \"You venture inside and notice that this was the local supermarket. You rummage through the \"\n \"store gathering all the food you can find, your bag gets full and you head back out...\"), 3,7)\nsupermarket.addItem(\"2 tins of spam and bottled water\", 0)\n\ngeneralStore = Locale(\"general store\",(\"\\nAnother store has its lights on in the distance and you go to investigate. \"\n \"Luckily for you, this store seems like it used to sell everything. The glass on the door is \"\n \"long gone and you walk in and start scavenging. You notice seemingly new footprints in the \"\n \"snow but pay no mind to it. In the store is basic supplies and medicine that you should take \"\n \"back to your house...\"), 5,10)\ngeneralStore.addItem(\"Antibiotics and a medkit\", 2)\n\nclothingStore = Locale(\"clothing store\", (\"\\nYou head out and find a clothing store and they have a lot of clothes, \"\n \"but nothing you need. You decide to take what you can and see if you can fashion your \"\n \"own clothes back home. As you make your way out, a gang appears a few houses down, \"\n \"they spot you and they start to ambush you. You should try to run home...\"), 1,5)\nclothingStore.addItem(\"Two oversized t-shirts\", 0)\n\nhospital = Locale(\"hospital\",(\"\\nYou stumble upon the abandoned hospital. This place was overrun by the infected. \"\n \"You cautiously walk around. The first floor is vacant and there is nothing of use. You hear \"\n \"noises above you. It is not worth the risk considering you aren't too desparate yet and there \"\n \"probably is nothing anyways. You leave this dangerous place behind...\"), -5,-2)\n\narmory = Locale(\"armory\", (\"\\nAfter strolling about, looking for stuff, you spot a little store that you've never \"\n \"seen. The glass doors are shattered and so you walk inside. You see the sign for London \"\n \"Armoury Company and notice the empty gun racks on the walls. After a little bit of exploring, \"\n \"you don't find any big weapons but you do find a sidearm in one of the drawers... \"), 7,10)\narmory.addItem(\"Pistol with 5 bullets in the magizine\",0)\n\nwembley = Locale(\"wembley\", (\"\\nYou are walking and you notice the arch is right in front of you. You reach Wembley \"\n \"Stadium, the center of English football. You remember coming to the last year's FA Cup \"\n \"Finals and Tottenham finally lifting a trophy. These memories are bittersweet because life has \"\n \"not been the same. As you start to regret coming here, you notice a large piece of paper on the \"\n \"floor. You pick it up and notice it is a map of the city. Now you feel like you have benefitted \"\n \"from this trip and leave happy...\"), 7,10)\nwembley.addItem(\"Map\",0)\n\nwarehouse = Locale(\"warehouse\", (\"\\nThis building is definitely going to be useless. You walk inside of \"\n \"this massive building that looks like it caught on fire. Inside are many cardboard boxes, \"\n \"charred and consumed. After further investigation, you make out the Amazon logo on one of \"\n \"the boxes. This must have been their warehouse. You walk around for a little longer and \"\n \"cannot seem to find anything of use to you in your current state. Back out you go...\"), -5,-2)\n\nlockedRoom = Locale(\"locked room\", (\"\\nYou have FINALLY found the key to open your neighbor's apartment. Hopefully that body wasn't his... \"\n \"Regardless, you enter the apartment and scavenge for whatever you can find. Everything is empty or \"\n \"rotten except for some vitamins in one of the cupboards. You take those back with you to your place...\\n\"\n \"ITEM FOUND: VITAMINS \\nITEM ACQUIRED!\"), 0,0)\nlocaleList = [home, street, subway, supermarket, generalStore, clothingStore, hospital, armory, wembley, warehouse]\n\ndef getInjured(): #function to determine if the user fell when moving between locations\n chanceOfInjury = int(randint(1,40)) #there are 4 instances in which the user can get hurt, wanted 4 different parts\n if (chanceOfInjury == 1 and not player.injured): #dont want to let the player stack up injuries\n player.injured = True\n print(\"\\nYou fell on some black ice and slice your left arm upon impact. It's not too bad but you need to find some medical supplies...\")\n print(probOfSurv(int(randint(-5,-2))))\n elif (chanceOfInjury == 11 and not player.injured): #Did not want 1,2,3,4 to be the chances because why not, \n player.injured = True #so each number ending in 1 represents a different body part\n print(\"\\nDown you go on some ice you failed to spot. You cut your right arm as you land and need some medical supplies to patch it up...\")\n print(probOfSurv(int(randint(-5,-2))))\n elif (chanceOfInjury == 21 and not player.injured):\n player.injured = True\n print(\"\\nYou tripped on some trash and a rusty screw punctured your left leg. Hopefully it is not infected or maybe some antibiotics would help...\")\n print(probOfSurv(int(randint(-5,-2))))\n elif (chanceOfInjury == 31 and not player.injured):\n player.injured = True\n print(\"\\nYou walked too close to the building on your right and a piece of stray glass sliced your right leg. You should be fine but a bandage would be nice...\")\n print(probOfSurv(int(randint(-5,-2))))\n \ndef printInventory():\n print(\"You currently have: \")\n for x in player.inventory:\n if (x == \"Antibiotics and a medkit\"): #general store is the only place that gives an item with limited uses \n print(\"\\n\\t\"+u\"\\u00BB\", x, \". Uses remaining: \" + str(generalStore.uses))\n elif (x == \"Vitamins\" and not vitaminsUsed):\n print(\"\\n\\t\"+u\"\\u00BB\", x, \". Uses remaining: \" + str(1))\n elif (x == \"Vitamins\" and vitaminsUsed):\n print(\"\\n\\t\"+u\"\\u00BB\", x, \". Uses remaining: \" + str(0))\n else:\n print(\"\\n\\t\"+u\"\\u00BB\", x)\n\ndef hasInList(listName, elem):\n for x in listName:\n if (x==elem):\n return True\n return False\n\ndef take(locale): \n print(itemFound + str(locale.item)) #prints the item found message regardless of if an item is present or not\n if(locale.item!=None): #as long as there is an item at that location, it will enter the while loop asking the user if they want to pick up the available items\n tempBoolean = True\n else:\n print(\"There are no items to be picked up here.\") #if no items are to be picked up, user is notified\n tempBoolean = False\n while tempBoolean:\n takingIt = input(\"Would you like to take the item found? (Y/N) \") #prompts user to pick up or leave item\n if(takingIt.lower().strip()==\"y\"):\n player.inventory.append(locale.item) #takes the item and puts it into the user inventory\n if(locale.item==\"Map\"): # if the map is what is found, and then accepted, the user now has access to the map command\n global mapFound\n mapFound = True\n if(locale.name==generalStore.name):\n useableItems.append(locale.item)\n if(locale.name==street.name):\n useableItems.append(locale.item)\n global keyFound\n keyFound = True\n locale.item=None #clears the location of the items --> locale dicitonary item now holds none\n tempBoolean = False #exits loop\n print(\"ITEM ACQUIRED!\") #lets user know they successfully pick up a new item\n elif(takingIt.lower().strip()==\"n\"):\n break #if they don't want the item, nothing happens\n else:\n print('You are not entering a valid command. Please type \"Y\" or \"N\".')\n \ndef goHome():\n getInjured()\n player.locationDescription = home.description\n player.locationName = home.name\n print(player.locationDescription)\n take(home)\n if(not home.hasBeen):\n print(probOfSurv(home.chance))\n home.hasBeen = True\n player.movesMade +=1\n \ndef goStreet():\n getInjured()\n player.locationDescription = street.description\n player.locationName = street.name\n print(player.locationDescription)\n take(street)\n if(not street.hasBeen):\n print(probOfSurv(street.chance))\n street.hasBeen = True\n player.movesMade +=1\n \ndef goSubway():\n getInjured()\n player.locationDescription = subway.description\n player.locationName = subway.name\n print(player.locationDescription)\n take(subway)\n if(not subway.hasBeen):\n print(probOfSurv(subway.chance))\n subway.hasBeen = True\n player.movesMade +=1\n \ndef goSupermarket():\n getInjured()\n player.locationDescription = supermarket.description\n player.locationName = supermarket.name\n print(player.locationDescription)\n take(supermarket)\n if(not supermarket.hasBeen):\n print(probOfSurv(supermarket.chance))\n supermarket.hasBeen = True\n player.movesMade +=1\n \ndef goGeneralStore():\n getInjured()\n player.locationDescription = generalStore.description\n player.locationName = generalStore.name\n print(player.locationDescription)\n take(generalStore)\n if(not home.hasBeen):\n print(probOfSurv(generalStore.chance))\n generalStore.hasBeen = True\n player.movesMade +=1\n \ndef goClothingStore():\n getInjured()\n player.locationDescription = clothingStore.description\n player.locationName = clothingStore.name\n print(player.locationDescription)\n take(clothingStore)\n if(not clothingStore.hasBeen):\n print(probOfSurv(clothingStore.chance))\n clothingStore.hasBeen = True\n player.movesMade +=1\n \ndef goHospital():\n getInjured()\n player.locationDescription = hospital.description\n player.locationName = hospital.name\n print(player.locationDescription)\n take(hospital)\n if(not hospital.hasBeen):\n print(probOfSurv(hospital.chance))\n hospital.hasBeen = True\n player.movesMade +=1\n \ndef goArmory():\n getInjured()\n player.locationDescription = armory.description\n player.locationName = armory.name\n print(player.locationDescription)\n take(armory)\n if(not armory.hasBeen):\n print(probOfSurv(armory.chance))\n armory.hasBeen = True\n player.movesMade +=1\n\ndef goWembley():\n getInjured()\n player.locationDescription = wembley.description\n player.locationName = wembley.name\n print(player.locationDescription)\n take(wembley)\n if(not wembley.hasBeen):\n print(probOfSurv(wembley.chance))\n wembley.hasBeen = True\n player.movesMade +=1\n\ndef goWarehouse():\n getInjured()\n player.locationDescription = warehouse.description\n player.locationName = warehouse.name\n print(player.locationDescription)\n take(warehouse)\n if(not warehouse.hasBeen):\n print(probOfSurv(warehouse.chance))\n warehouse.hasBeen = True\n player.movesMade +=1\n\ndef goLockedRoom(): #Can't fall on ice when moving between rooms in a building\n player.locationDescription = lockedRoom.description\n player.locationName = lockedRoom.description\n print(player.locationDescription)\n if(not lockedRoom.hasBeen):\n lockedRoom.hasBeen = True\n lockedRoom.updateDescription(\"You have already looted this room. Time to move on...\")\n home.updateDescription(\"\\nYou arrive back at home and now have more resources. You are still limited and decide \"\n \"to go back and search in the morning. You feel good and have confidence that you'll be able to \"\n \"outlast the quarantine and make it home. It also feels good knowing you finally got into that apartment...\" )\n\ndef updateGame(userInput): \n global vitaminsUsed\n direction = userInput\n cantGo = (\"\\n You head \" + direction + \" and notice the quarantine zone border. You cannot continue in that direction.\")\n if (userInput == \"use key\" and player.locationName == home.name and keyFound==True and not lockedRoom.hasBeen):\n goLockedRoom()\n player.inventory.append(\"Vitamins\")\n useableItems.append(\"Vitamins\")\n goHome()\n elif (userInput == \"use key\" and player.locationName == home.name and keyFound==True and lockedRoom.hasBeen):\n print(\"You have already opened the room and collected everything inside, time to move on.\")\n elif (userInput == \"use key\" and player.locationName != home.name and keyFound == True):\n print(\"You look around and see nowhere for the key to be used...\")\n elif (userInput == \"use key\" and player.locationName == home.name and keyFound == False):\n print(\"You still do not have the sufficent key to open the door...\")\n elif (userInput == \"use key\" and player.locationName != home.name and keyFound == False):\n print(\"What key?\")\n elif (userInput == \"use vitamins\" and hasInList(useableItems,\"Vitamins\")):\n print(\"Vitamins successfully used\")\n print(probOfSurv(int(randint(5,10))))\n vitaminsUsed = True\n useableItems.remove(\"Vitamins\")\n elif (userInput == \"use vitamins\" and not hasInList(useableItems, \"Vitamins\")):\n print(\"You do not have any more vitamins to use. Keep looking...\")\n elif (userInput == \"use meds\" and player.injured and hasInList(useableItems, \"Antibiotics and a medkit\")):\n print(\"You have successfully healed your wound...\")\n print(probOfSurv(int(randint(5,8))))\n player.injured = False\n if (generalStore.uses>1):\n generalStore.uses -=1 #subtracts the amount of uses left on the meds\n elif (generalStore.uses ==1):\n generalStore.uses -=1\n useableItems.remove(\"Antibiotics and a medkit\") #There are now 0 uses left, so now the user cannot use them\n elif (userInput == \"use meds\" and not player.injured and hasInList(useableItems, \"Antibiotics and a medkit\")):\n print(\"Luckily for you, there are no injuries and the medical supplies won't be of any use to you right now...\")\n elif (userInput == \"use meds\" and player.injured and not hasInList(useableItems, \"Antibiotics and a medkit\")):\n print(\"You do not have any medical supplies to help you with this injury. Keep looking before it gets worse...\")\n elif (userInput == \"use meds\" and not player.injured and not hasInList(useableItems, \"Antibiotics and a medkit\")):\n print(\"What medical supplies? What wounds?\")\n \n elif(direction==\"north\"): #Can't go from wembley, clothing, armory, warehouse\n if(player.locationName==home.name):\n goSubway()\n elif(player.locationName==street.name):\n goClothingStore()\n elif(player.locationName==supermarket.name):\n goHome()\n elif(player.locationName==generalStore.name):\n goArmory()\n elif(player.locationName==hospital.name):\n goGeneralStore()\n elif(player.locationName==subway.name):\n goWembley()\n else:\n print(cantGo)\n elif(direction==\"south\"): #Can't go from streets, supermarket, hospital, warehouse\n if(player.locationName==home.name):\n goSupermarket()\n elif(player.locationName==subway.name):\n goHome()\n elif(player.locationName==generalStore.name):\n goHospital()\n elif(player.locationName==clothingStore.name):\n goStreet()\n elif(player.locationName==armory.name):\n goGeneralStore()\n elif(player.locationName==wembley.name):\n goSubway()\n else:\n print(cantGo)\n elif(direction==\"east\"): #Can't go from warehouse, general, hospital, wembley\n if(player.locationName==home.name):\n goGeneralStore()\n elif(player.locationName==street.name):\n goSupermarket()\n elif(player.locationName==subway.name):\n goArmory()\n elif(player.locationName==supermarket.name):\n goHospital()\n elif(player.locationName==clothingStore.name):\n goHome()\n elif(player.locationName==armory.name):\n goWarehouse()\n else:\n print(cantGo)\n elif(direction==\"west\"): #Can't go from wembley, subway, clothing, streets\n if(player.locationName==home.name):\n goClothingStore()\n elif(player.locationName==supermarket.name):\n goStreet()\n elif(player.locationName==generalStore.name):\n goHome()\n elif(player.locationName==hospital.name):\n goSupermarket()\n elif(player.locationName==armory.name):\n goSubway()\n elif(player.locationName==warehouse.name):\n goArmory()\n else:\n print(cantGo)\n else:\n print('You are not entering a valid input. Try again or type \"help\". ') #anything entered that is not in the directionHeading array will not be reconginzed and user has to input a new request\n pressEnter()\n \ndef introCharChoice(): #gets player name and gender, player chooses character type, intro is displayed\n print(title) #prints game title\n name = input(\"Welcome player, insert your name: \") #gets name\n gender = input(\"Thanks \" + name + \". Now select your gender: M/F \") #gets gender\n setDefault(name, gender)\n print(\"Now pick your character type:\\n\")\n options() #shows all the possible characters with the strength and weaknesses\n player.welcome() #Greets player before game starts\n print(\"You were born in Napa Valley, California and graduated from Lowell High School,\" # show game introduction part 1\n \" one of the best school in the bay area. You graduated top 10 in your class\"\n \" and have had a passion for \" +player.passion+\n \" ever since you were a little kid. Unfortunately for you, your\"\n \" parents owned a lot of land and wanted you to take over the family run\"\n \" vineyard. This lifestyle was not for you. Upon graduating you\"\n \" wanted to attend \"+player.college+ \", one of the best in your field,\"\n \" but your parents were against it. They said as long as you lived here, you\"\n \" would obey them and they would not fund you. After much consideration,\"\n \" you decided that you will pursue your own path. You did not have enough\"\n \" money to attend the school of your dreams, so you decided to try something\"\n \" new and go to London, England as you always desired to go there.\")\n pressEnter() # prompt the user\n print(\"\\nYou have been in London for 3 years, working \" + player.tempJob + # character introduction part 2\n \" in order to save money and hopefully eventually get a degree. \"\n \" You were getting close to getting enough money to start school when it happened.\"\n \" All of a sudden, an epidemic has hit the city. Slowly everyone around you is\"\n \" getting sick and you need to leave but all flights are grounded and the city\"\n \" is quarantined until further notice. Now you need to find a way to survive the\"\n \" frigid winter all alone...\\n\" + \"You awake to find yourself in complete darkness at a subway station. \"\n \"You must've spent the night there, but you don't know how you ended up here. You exit and the sky is clouded and snow is \"\n \"starting to fall. The power keeps flickering and you don't know where to begin, but you do remember where a lot of the shops are.\\n\"\n \"\\nYour starting survival probability is \" + str(player.score) + \"%.\")\n\ndef setDefault(name, gender):\n player.name = name\n player.gender = gender\n player.score = int(randint(5,25))\n player.locationName = subway.name #starting location is subway so assigning that here\n player.locationDescription = subway.description\n player.movesMade = 0\n global mapFound, keyFound, vitaminsUsed, useableItems\n mapFound = False\n keyFound = False\n vitaminsUsed = False\n player.injured = False\n useableItems = []\n for x in localeList:\n x.hasBeen = False\n street.addItem(\"Unknown key\",0)\n supermarket.addItem(\"2 tins of spam and bottled water\", 0)\n generalStore.addItem(\"Antibiotics and a medkit\", 2)\n clothingStore.addItem(\"Two oversized t-shirts\", 0)\n armory.addItem(\"Pistol with 5 bullets in the magizine\",0)\n wembley.addItem(\"Map\",0)\n\ndef takeInput(): #takes input and returns normalized input \n userInput = input('Would you like to go \"north\", \"south\", \"east\", or \"west\". Or type \"help\", \"map\", \"inventory\", '\n ' \"use\" followed by the item (see help), or \"quit\": ')\n normInput = userInput.strip().lower()\n return normInput\n\ndef helpMenu():\n print(\"\\n Choose the direction you would like to head:\"\n \"\\n\\t\"+u\"\\u2023\"+\"North makes your character head north. \\n\\t\"+u\"\\u2023\"+\"South makes your character go south.\"\n \"\\n\\t\"+u\"\\u2023\"+\"East makes your character go east. \\n\\t\"+u\"\\u2023\"+\"West makes your character go west.\"\n \"\\n\\t\"+u\"\\u2023\"+\"Map shows the map of the world (Only if map is found).\\n\\t\"+u\"\\u2023\"+\"Inventory shows \"\n \"what is currently in the users inventory\" \"\\n\\t\"+u\"\\u2023\"+'Use followed by \"key\", \"vitamins\" or \"meds\" will use said item.'\n \"\\n\\t\"+u\"\\u2023\"+\"Quit exits the game.\")\n\ndef renderGame():\n print(\"\\nMOVES MADE: \" + str(player.movesMade))\n print(\"LOCATION: \" + player.locationName.upper() + \". SURVIVAL PROBABILITY: \" + str(player.score) + \"%.\") \n\ndef options(): #prints the description for each of the available characters\n Player.display(\"\")\n choice = input(\"\\nEnter the number for the character you wish to play as: \") #Has player pick character\n if(choice==\"1\"):\n player.assignRole(1)\n elif(choice==\"2\"):\n player.assignRole(2)\n elif(choice==\"3\"):\n player.assignRole(3)\n elif(choice==\"4\"):\n player.assignRole(4)\n else:\n print(\"Please enter a number that is listed above\")\n options() \n\ndef pressEnter(): #function for prompting player to continue\n input(\"\\n\")\n\ndef probOfSurv(chance): #function for printing probability of surviving\n oldSurv = player.score\n player.score +=chance\n if oldSurv>player.score:\n incOrDec = \"decreased\"\n else:\n incOrDec = \"increased\"\n survMsg = (\"\\nYour probability of survival \" + incOrDec + \" to \" + str(player.score)+ \"%.\")\n return survMsg\n\ndef map():\n print('''\n Wembley\n |\n |\n Subway -------- Armory ---- Warehouse\n | | \n | | \n Clothing ----- Home -------- General\n | | | \n | | | \n Streets --- Supermarket ---- Hospital \n ''')\n\ndef gameLoop(): #game loop to take input and update game\n global userInput\n while player.movesMade55):\n print(\"Congratulations! You have gathered enough materials and it looks like you should be able to survive long enough to leave.\")\n else:\n print(\"\\nYour time has expired. It seems as if you are stuck here forever. Better luck next time!\")\n print(ownership) # show credits\n\ndef playAgain():\n global player\n again = input('\\n Would you like to play again. Enter \"Y\" or \"N\": ') \n if (again.strip().lower() == \"y\"):\n player.movesMade = 0\n player.inventory = []\n player.score = 0\n main()\n elif (again.strip().lower() == \"n\"):\n pass\n else:\n print('You are not entering a valid input. Please try again and enter only \"Y\" or \"N\".')\n playAgain()\n \ndef main(): #main function to run game\n introCharChoice()\n gameLoop()\n ending()\n playAgain()\n\n# runs the game\nmain()\n","sub_path":"LastStand/lastStand.py","file_name":"lastStand.py","file_ext":"py","file_size_in_byte":27819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"169649100","text":"import discord,re\nfrom discord.ext import commands\ndef server_check(ctx):\n\treturn ctx.message.server.id == \"448081955221798923\"\n\nclass submit():\n\tdef __init__(self,bot):\n\t\tself.bot=bot\n\n\t@commands.command(pass_context=True)\n\t@commands.check(server_check)\n\tasync def submit(self,ctx):\n\t\tmsg = \"Reply to this conversation with your server invite and (brief) description. Read #how-to-list before applying.\"\n\t\tinvalid=\"Your reply was late or you did not provide a valid discord invite(formatted as https://discord.gg/) use the !submit command again\"\n\t\tconfirm=\"Your application has been submitted and is being looked over by the mods , expect a reply in a few days\"\n\t\tawait self.bot.send_message(ctx.message.author,msg)\n\t\tdef check(message):\n\t\t\treturn True\n\t\tregex = re.compile(\"https://(discord\\.gg/[^\\s]*)\")\n\t\treply = await self.bot.wait_for_message(author = ctx.message.author,check = check)\n\t\tvar = regex.search(reply.content)\n\t\tif var is None:\n\t\t\t\n\t\t\tawait self.bot.send_message(ctx.message.author,invalid)\n\t\telse:\n\t\t\t\n\t\t\tthing = await self.bot.get_invite(var.group())\n\t\t\tif thing is not None:\n\t\t\t\tsay = \"Sent by <@\" + ctx.message.author.id +\">\"\n\t\t\t\tawait self.bot.send_message(discord.Object(\"587466715407843328\"),reply.content)\n\t\t\t\tawait self.bot.send_message(discord.Object(\"587466715407843328\"),say)\n\t\t\t\tawait self.bot.send_message(ctx.message.author,confirm)\n\n\t\t\telse:\n\t\t\t\tawait self.bot.send_message(ctx.message.author,invalid)\n\t\t\t\t\n\t@commands.command(pass_context=True)\n\tasync def regex(self,ctx,msg:str):\n\t\tregex = re.compile(r'discord\\.gg/[^\\s]*')\n\t\tvar = regex.search(msg)\n\t\tif var is None:\n\t\t\tawait self.bot.say(\"none\")\n\t\telse:\n\t\t\tawait self.bot.say(var.group())\n\t\t\ndef setup(bot):\n\tbot.add_cog(submit(bot))","sub_path":"modules/submit.py","file_name":"submit.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"649532515","text":"#7-3, Python Crash Course, Multiples of Ten\n\n#Stores the prompt for user innput\nprompt = \"Give me a number, and I will tell you if it is a multiple of 10: \"\n\n#Stores user's number in variable\nnumber = int(input(prompt))\n\n#Checks if number is a multiple of ten and displays a message\nif number % 10 == 0:\n print(\"\\nThe number \" + str(number) + \" is indeed a mulitple of 10. \")\nelse:\n print(\"\\nThe number \" + str(number) + \" is not a mulitple of 10. \")\n","sub_path":"Input/multiples_of_ten_73.py","file_name":"multiples_of_ten_73.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"377318557","text":"#!/usr/bin/env python\n\nfrom carddeck import CardDeck\n\n# class base class\nclass JokerDeck(CardDeck):\n\n def _create_deck(self):\n super()._create_deck() # call parent method\n self._cards.append(('First', 'Joker'))\n self._cards.append(('Second', 'Joker'))\n","sub_path":"jokerdeck.py","file_name":"jokerdeck.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"449874548","text":"\"\"\"tests for linear algebra utilities\"\"\"\nimport numpy as np\nimport pytest\nfrom scipy.linalg import LinAlgError\n\nfrom pysip.utils.math import diff_upper_cholesky, nearest_cholesky\n\n\n@pytest.mark.parametrize(\"N\", [5, 10, 25, 50])\ndef test_nearest_upper_cholesky(N):\n\n S = np.cov(np.random.randn(N, 2 * N))\n S = (S + S.T) / 2.0\n Is = np.eye(S.shape[0])\n jitter = 1e-10\n while jitter < 1.0:\n try:\n S += jitter * Is\n upper_chol = np.linalg.cholesky(S).T\n break\n except LinAlgError:\n jitter *= 10.0\n\n nearest_upper_chol = nearest_cholesky(S)\n\n assert np.allclose(upper_chol.T @ upper_chol, S)\n assert np.allclose(nearest_upper_chol.T @ nearest_upper_chol, S)\n\n\n@pytest.mark.parametrize(\"N\", [5, 10, 25, 50])\ndef test_diff_upper_cholesky(N):\n S = np.cov(np.random.randn(N, 2 * N))\n dS = np.cov(np.random.randn(N, 2 * N))\n S = (S + S.T) / 2.0\n Is = np.eye(S.shape[0])\n jitter = 1e-10\n while jitter < 1.0:\n try:\n S += jitter * Is\n upper_chol = np.linalg.cholesky(S).T\n break\n except LinAlgError:\n jitter *= 10.0\n\n def fd_upper_cholesky(S, dS):\n hh = 1e-5\n R1 = np.linalg.cholesky(S - dS * hh / 2.0).T\n R2 = np.linalg.cholesky(S + dS * hh / 2.0).T\n return (R2 - R1) / hh\n\n dR_fd = fd_upper_cholesky(S, dS)\n\n R = nearest_cholesky(S)\n dR = diff_upper_cholesky(R, dS)\n\n assert np.allclose(upper_chol.T @ upper_chol, S)\n assert np.allclose(R.T @ R, S)\n assert np.allclose(dR, dR_fd)\n","sub_path":"tests/utils/test_math.py","file_name":"test_math.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"306197086","text":"\"\"\"Mancala, built on Norvig's Game class\nadapted to Python 3 in September 2016\n\n\"\"\"\n\nfrom utils import *\n# from Tkinter import *\nimport random, re, time\n\n\ncount = 0\ntesting = 0\nBigInitialValue = 1000000\n\n\ndef alphabeta_search(state, game, d=4, cutoff_test=None, eval_fn=None):\n \"\"\"Search game to determine best action; use alpha-beta pruning.\n This version cuts off search and uses an evaluation function.\"\"\"\n global count\n global testing\n global BigInitialValue\n\n player = game.to_move(state)\n count = 0\n starttime = time.time()\n\n\n def max_value(state, alpha, beta, depth):\n global count, testing\n if testing:\n print(\" \"* depth, \"Max alpha: \", alpha, \" beta: \", beta, \" depth: \", depth)\n if cutoff_test(state, depth):\n if testing:\n print(\" \"* depth, \"Max cutoff returning \", eval_fn(state))\n return eval_fn(state)\n v = -BigInitialValue\n succ = game.successors(state)\n count = count + len(succ)\n if testing:\n print(\" \"*depth, \"maxDepth: \", depth, \"Total:\", count, \"Successors: \", len(game.successors(state)))\n for (a, s) in succ:\n # Decide whether to call max_value or min_value, depending on whose move it is next.\n # Some games, such as Mancala, sometimes allow the same player to make multiple moves.\n if state.to_move == s.to_move:\n v = max(v, max_value(s, alpha, beta, depth+1))\n else:\n v = max(v, min_value(s, alpha, beta, depth+1))\n if testing:\n print(\" \"* depth, \"max best value:\", v)\n if v >= beta:\n return v\n alpha = max(alpha, v)\n return v\n\n def min_value(state, alpha, beta, depth):\n global count\n if testing:\n print(\" \"*depth, \"Min alpha: \", alpha, \" beta: \", beta, \" depth: \", depth)\n if cutoff_test(state, depth):\n if testing:\n print(\" \"*depth, \"Min cutoff returning \", eval_fn(state))\n return eval_fn(state)\n v = BigInitialValue\n succ = game.successors(state)\n count = count + len(succ)\n if testing:\n print(\" \"*depth, \"minDepth: \", depth, \"Total:\", count, \"Successors: \", len(game.successors(state)))\n for (a, s) in succ:\n # Decide whether to call max_value or min_value, depending on whose move it is next.\n # Some games, such as Mancala, sometimes allow the same player to make multiple moves.\n if state.to_move == s.to_move:\n v = min(v, min_value(s, alpha, beta, depth+1))\n else:\n v = min(v, max_value(s, alpha, beta, depth+1))\n if testing:\n print(\" \"*depth, \"min best value:\", v)\n if v <= alpha:\n return v\n beta = min(beta, v)\n return v\n\n def right_value(s, alpha, beta, depth):\n if s.to_move == state.to_move:\n return max_value(s, -BigInitialValue, BigInitialValue, 0)\n else:\n return min_value(s, -BigInitialValue, BigInitialValue, 0)\n\n # Body of alphabeta_search starts here:\n # The default test cuts off at depth d or at a terminal state\n cutoff_test = (cutoff_test or\n (lambda state,depth: depth>d or game.terminal_test(state)))\n eval_fn = eval_fn or (lambda state: game.utility(state, game.current_player))\n action, state = argmax(game.successors(state),\n # lambda ((a, s)): right_value(s, -BigInitialValue, BigInitialValue, 0))\n lambda a_s: right_value(a_s[1], -BigInitialValue, BigInitialValue, 0))\n stoptime = time.time()\n elapsed = stoptime - starttime\n print(\"Final count: \", count, \"Time: \", end=\",\")\n print(\" %.5f seconds\" % elapsed)\n return action\n\n#______________________________________________________________________________\n# Players for Games\n\n##def query_player(game, state):\n## \"Make a move by querying standard input.\"\n## game.display(state)\n## return num_or_str(raw_input('Your move? '))\n##\n##def random_player(game, state):\n## \"A player that chooses a legal move at random.\"\n## # Added state argument to legal_moves AJG 8/9/04\n## return random.choice(game.legal_moves(state))\n##\n##def alphabeta_player(game, state):\n## return alphabeta_search(state, game)\n##\n##def alphabeta_full_player(game, state):\n## return alphabeta_full_search(state, game)\n##\n##def alphabeta_depth1_player(game, state):\n## return alphabeta_search(state, game, 1)\n\nclass mancala_player:\n def __init__(self, name):\n self.name = name\n\n def calculate_utility(self, boardstate):\n return boardstate.default_utility()\n\n def alphabeta_parameters(self, boardstate, remainingTime):\n # This should return a tuple of (cutoffDepth, cutoffTest, evalFn)\n # where any (or all) of the values can be None, in which case the\n # default values are used:\n # cutoffDepth default is 4\n # cutoffTest default is None, which just uses cutoffDepth to\n # determine whether to cutoff search\n # evalFn default is None, which uses your boardstate_utility_fn\n # to evaluate the utility of board states.\n return (4, None, None)\n\nclass mancala_player2:\n def __init__(self, name):\n self.name = name\n\n def calculate_utility(self, boardstate):\n return boardstate.default_utility2()\n\n def alphabeta_parameters(self, boardstate, remainingTime):\n # This should return a tuple of (cutoffDepth, cutoffTest, evalFn)\n # where any (or all) of the values can be None, in which case the\n # default values are used:\n # cutoffDepth default is 4\n # cutoffTest default is None, which just uses cutoffDepth to\n # determine whether to cutoff search\n # evalFn default is None, which uses your boardstate_utility_fn\n # to evaluate the utility of board states.\n return (6, None, None)\n\ndef play_mancala(game=None,initialTime=600,\n player1=mancala_player(\"p1\"),player2=mancala_player2(\"p2\")):\n \"Play an 2-person, move-alternating Mancala game.\"\n # This is play_game with stuff added to keep track of time.\n game = game or Mancala()\n state = game.initial\n players = (player1, player2)\n # initialize the amount of time for each player. Units are seconds.\n # 600 seconds is 10 minutes\n clocks = {player1: initialTime, player2: initialTime}\n previousPass = 0\n while True:\n player = players[state.to_move]\n game.current_player = player\n params = player.alphabeta_parameters(state, clocks[player])\n startTime = time.time()\n move = alphabeta_search(state, game, params[0], params[1], params[2])\n endTime = time.time()\n moveTime = endTime - startTime\n if moveTime > clocks[player]:\n print(\"Player\", player.name, \"took too much time and loses.\")\n # Should really just return some utility that reflects player losing.\n return \"Game Over!!!\"\n else:\n clocks[player] -= moveTime\n state = game.make_move(move, state)\n print(\"Time remaining player 1:\", clocks[player1], \"player 2:\", clocks[player2])\n print(\"Player: \", player.name, \"took move \", move, \"resulting in state:\")\n game.display(state)\n if game.terminal_test(state):\n P0Count = state.PlayerPieceCount(P0)\n P1Count = state.PlayerPieceCount(P1)\n print(\"Player: \", players[P0].name, \"piece count: \", P0Count)\n print(\"Player: \", players[P1].name, \"piece count: \", P1Count)\n if P0Count > P1Count:\n print(\"Player \", players[P0].name, \" WINS!!!\")\n elif P1Count > P0Count:\n print(\"Player \", players[P1].name, \" WINS!!!\")\n else:\n print(\"Game is a tie.\")\n return \"Game Over!!\"\n\nclass Game:\n \"\"\"A game is similar to a problem, but it has a utility for each\n state and a terminal test instead of a path cost and a goal\n test. To create a game, subclass this class and implement\n legal_moves, make_move, utility, and terminal_test. You may\n override display and successors or you can inherit their default\n methods. You will also need to set the .initial attribute to the\n initial state; this can be done in the constructor.\"\"\"\n\n def legal_moves(self, state):\n \"Return a list of the allowable moves at this point.\"\n abstract()\n\n def make_move(self, move, state):\n \"Return the state that results from making a move from a state.\"\n abstract()\n\n def utility(self, state, player):\n \"Return the value of this final state to player.\"\n abstract()\n\n def terminal_test(self, state):\n \"Return True if this is a final state for the game.\"\n return not self.legal_moves(state)\n\n def to_move(self, state):\n \"Return the player whose move it is in this state.\"\n return state.to_move\n\n def display(self, state):\n \"Print or otherwise display the state.\"\n print(state)\n\n def successors(self, state):\n \"Return a list of legal (move, state) pairs.\"\n m = [(move, self.make_move(move, state))\n for move in self.legal_moves(state)]\n return m\n\n# return [(move, self.make_move(move, state))\n# for move in self.legal_moves(state)]\n\n def __repr__(self):\n return '<%s>' % self.__class__.__name__\n\n\n#______________________________________________________________________________\n# Beginning of Mancala classes\n\n# Mancala board representation:\n# x1 x2 x3 x4 x5 x6\n# x0 y0\n# y1 y2 y3 y4 y5 y6\n#\n# Assume the named positions shown above, where y0 and x0 are the Mancala's for\n# player's y and x respective, and the remaining positions are the bins.\n# The board is stored as a list with the elements in the following order:\n# [y1, y2, y3, y4, y5, y6, y0,\n# x6, x5, x4, x3, x2, x1, x0]\n#\n# Notice that this ordering means we can just step through the list to find\n# continguous bins.\n\n# give names to where particular positions appear in the internal list\nP0Mancala = 6\nP1Mancala = 13\nP0Start = 0\nP1Start = 7\n\n# other constants\nInitialPieceCount = 6\nP0 = 0\nP1 = 1\n\ndef opponent(player):\n if player == 1:\n return 0\n elif player == 0:\n return 1\n else:\n print(\"Oooooooooooooooops\")\n\nclass BoardState:\n \"\"\"Holds one state of the Mancala board.\"\"\"\n def __init__(self, to_move=None, utility=None, board=None, moves=None):\n if ((to_move == 0) or (to_move == 1)): # assume if to_move is not None, then neither are the rest\n self.to_move = to_move\n self._utility = utility\n self._board = board\n self._moves = moves\n else:\n self.create_initial_boardstate()\n\n def getPlayer(self):\n return self.to_move\n\n def create_initial_boardstate(self):\n \"\"\"Create an initial boardstate with the default start state.\"\"\"\n # this magic bit of code creates a list with 14 elements in it,\n # each of which is InitialPieceCount\n b = [InitialPieceCount] * 14\n # Now reset the 2 Mancala bins to be 0, instead of InitialPieceCount\n b[P0Mancala] = 0\n b[P1Mancala] = 0\n self._board = b\n\n self.to_move = P0 # P0 has the first move\n self._moves = self.calculate_legal_moves()\n self._utility = self.default_utility()\n\n def legal_p(self, move):\n \"A legal move must involve a position with pieces.\"\n if self._board[move] > 0:\n return True\n else:\n return None\n\n def legal_moves(self):\n \"Return a list of legal moves for player.\"\n return self._moves\n\n def calculate_legal_moves(self):\n \"\"\"Calculate the legal moves in the current BoardState.\"\"\"\n moves = []\n if self.to_move == P0:\n # the range of bins that P0 is allowed to choose from\n for poss in range(P0Start,P0Mancala):\n if self.legal_p(poss):\n moves.append(poss)\n else:\n # the range of bins that P1 is allowed to choose from\n for poss in range(P1Start,P1Mancala):\n if self.legal_p(poss):\n moves.append(poss)\n return moves\n\n\n def make_move(self, move):\n \"Return a new BoardState reflecting move made from given board state.\"\n newboard = BoardState(opponent(self.to_move), None, self._board[:], None)\n # Note where the opponent's mancala is located\n if self.to_move == P1:\n oppMancala = P0Mancala\n else:\n oppMancala = P1Mancala\n if move != None:\n # the number of pieces to be distributed around the board\n pieceCount = newboard._board[move]\n # set the move position's piece count to 0\n newboard._board[move] = 0\n\n # figure out the first bin that gets a piece distributed to it\n position = (move + 1) % 14\n # keep distributing pieces until you run out\n while pieceCount > 0:\n # don't distribute a piece to the opponent's mancala\n if position != oppMancala:\n newboard._board[position] += 1\n pieceCount -= 1\n position = (position + 1) % 14\n\n # if last move is into a previously empty bin owned by mover, then capture\n # the opponent's pieces from the opposite bin\n lastMove = (position - 1) % 14\n inRange = False\n if self.to_move == P0:\n if lastMove in range(P0Start, P0Mancala):\n inRange = True\n else:\n if lastMove in range(P1Start, P1Mancala):\n inRange = True\n if (newboard._board[lastMove] == 1) and inRange:\n # Magically, in our representation the \"opposite\" bin is always in the\n # array position 12 - Bin.\n oppBin = 12 - lastMove\n oppCount = newboard._board[oppBin]\n newboard._board[oppBin] = 0\n # print \"!!! captured \", oppCount, \" pieces from bin: \", oppBin\n if self.to_move == P0:\n newboard._board[P0Mancala] += oppCount\n else:\n newboard._board[P1Mancala] += oppCount\n\n # if last move is into the player's own Mancala, then they get another turn\n # Note that this is kind of weird from a Minimax search perspective.\n if (self.to_move == P0) and (lastMove == P0Mancala):\n newboard.to_move = self.to_move\n elif (self.to_move == P1) and (lastMove == P1Mancala):\n newboard.to_move = self.to_move\n\n newboard._moves = newboard.calculate_legal_moves()\n return newboard\n\n def PlayerPieceCount(self, player):\n \"Return a count of player's pieces where player is P0 or P1.\"\n total = 0\n if player == P0:\n for pos in range(P0Start,P0Mancala+1):\n total += self._board[pos]\n else:\n for pos in range(P1Start,P1Mancala+1):\n total += self._board[pos]\n return total\n\n def default_utility(self):\n \"Return count of player's pieces minus opponent's pieces.\"\n p0total = self.PlayerPieceCount(P0)\n p1total = self.PlayerPieceCount(P1)\n if self.to_move == P0:\n return p0total - p1total\n else:\n return p1total - p0total\n\n def default_utility2(self):\n \"Return player's Mancala count minus opponent's Mancala count.\"\n if self.to_move == P0:\n return self._board[P0Mancala] - self._board[P1Mancala]\n else:\n return self._board[P1Mancala] - self._board[P0Mancala]\n\nclass Mancala(Game):\n \"\"\"Play Mancala on a board with 6 bins plus a Mancala for each player.\"\"\"\n\n def __init__(self):\n self.current_state = BoardState()\n self.initial = self.current_state\n\n def display(self, boardstate):\n \"Print out the board in a readable way.\"\n print(' ', end=\"\")\n for top in range(12,7,-1):\n print('%2d' % boardstate._board[top],end=\",\")\n print('%2d' % boardstate._board[7],end=\"\\n\")\n # print(' ', end='\\n')\n print(boardstate._board[P1Mancala],end=\"\")\n for place in range(12,6,-1):\n print(' ',end=\"\")\n print(' ', boardstate._board[P0Mancala])\n\n print(' ',end=\"\")\n for bottom in range(P0Start,P0Mancala-1):\n print('%2d' % boardstate._board[bottom],end=\",\")\n print('%2d' % boardstate._board[P0Mancala-1],end=\"\\n\")\n # print()\n\n def legal_moves(self, boardstate):\n return boardstate.legal_moves()\n\n def make_move(self, move, boardstate):\n \"Return a new BoardState reflecting move made from given board state.\"\n newBoard = boardstate.make_move(move)\n return newBoard\n\n def calculate_utility(self, boardstate):\n return boardstate.default_utility()\n\n def utility(self, boardstate, player):\n \"This is where your utility function gets called.\"\n return player.calculate_utility(boardstate)\n\n","sub_path":"mancala.py","file_name":"mancala.py","file_ext":"py","file_size_in_byte":17488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"529746120","text":"from fizzbuzz import fizzbuzz\n\nif __name__ == \"__main__\":\n print(\"Welcome to Fizzbuzz! \")\n userInput = input(\"Enter the number to fizz buzz too: \")\n try:\n int(userInput)\n if int(userInput) < 0: # If it isn't a positive number, print error message\n print(\"Sorry, it must be a positive number\")\n for x in range(0, int(userInput)):\n print(fizzbuzz.fizzbuzz(x))\n except ValueError:\n print(\"Please Enter a whole number\")\n","sub_path":"fizzbuzz/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"588890900","text":"class A():\n def __str__(self):\n return 'Anna'\n\na = A()\nprint(a)\n\nclass B():\n def __repr__(self):\n return 'Calvin'\nb = B()\nprint(b)\n\nimport time as t\nclass MiTimer():\n def start(self):\n self.start = t.localtime()\n print(\"start...\")\n def stop(self):\n self.stop = t.localtime()\n self._calc()\n print(\"end...\")\n def _calc(self):\n self.lasted = []\n self.prompt = 'total : '\n for index in range(6):\n self.lasted.append(self.stop[index]-self.start[index])\n self.prompt += str(self.lasted[index])\n print(self.prompt)\n\n#t1 = MiTimer()\n#t1.start()\n#t1.stop()\n","sub_path":"Python/init/f009/magic4.py","file_name":"magic4.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"380702438","text":"#!/usr/bin/env python3\nimport socket\nip_port = ('127.0.0.1', 9999)\nwhile True:\n sk = socket.socket()\n sk.connect(ip_port)\n text = str(sk.recv(1024), encoding='utf-8')\n print(text)\n choose = input('请输入号码')\n sk.sendall(bytes(choose, encoding='utf-8'))\n textb = str(sk.recv(1024), encoding='utf-8')\n print(textb)\n sk.close()","sub_path":"PycharmProjects/learn/day_10/scokerserver/s.py","file_name":"s.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"533283164","text":"\n\nfrom xai.brain.wordbase.nouns._raincoat import _RAINCOAT\n\n#calss header\nclass _RAINCOATS(_RAINCOAT, ):\n\tdef __init__(self,): \n\t\t_RAINCOAT.__init__(self)\n\t\tself.name = \"RAINCOATS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"raincoat\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_raincoats.py","file_name":"_raincoats.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"436968590","text":"import pandas as pd\r\nimport numpy as np\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.decomposition import PCA\r\n\r\npath = \".\\\\Dataset\\\\\"\r\n\r\nfilename = \"Company.NS_Features\"\r\ndf = pd.read_csv(path + filename + \".csv\")\r\n\r\nfeatures = df.columns[1:-1]\r\nx = df.loc[:, features].values\r\ny = df.loc[:,['Target']].values\r\n\r\nx = StandardScaler().fit_transform(x)\r\n\r\npca = PCA(n_components=3)\r\nprincipalComponents = pca.fit_transform(x)\r\n\r\neigenvalues = pca.explained_variance_\r\nprint(eigenvalues)\r\n\r\nprincipalDf = pd.DataFrame(data = principalComponents, columns = ['PC1', 'PC2', 'PC3'])\r\n\r\nfinalDf = pd.concat([principalDf, df[['Target']]], axis = 1)\r\n\r\npd.DataFrame.to_csv(finalDf,\"PCA.csv\", index=False)","sub_path":"PCA.py","file_name":"PCA.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"208412754","text":"from django import forms\nfrom django_summernote.widgets import SummernoteWidget\n\nfrom . import models\n\nclass FanficForm(forms.ModelForm):\n class Meta:\n exclude = ('public', 'author', 'date', 'fandom')\n model = models.Fanfic\n widgets = {\n 'text': SummernoteWidget(),\n }","sub_path":"fanfic_pl/fanfics/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"659193","text":"# Train multiple images per person\r\n# Find and recognize faces in an image using a SVC with scikit-learn\r\n\r\n\"\"\"\r\nStructure:\r\n .jpg\r\n /\r\n /\r\n .jpg\r\n .jpg\r\n .\r\n .\r\n .jpg\r\n /\r\n .jpg\r\n .jpg\r\n .\r\n .\r\n .jpg\r\n .\r\n .\r\n /\r\n .jpg\r\n .jpg\r\n .\r\n .\r\n .jpg\r\n\"\"\"\r\n\r\nimport face_recognition\r\nfrom sklearn import svm\r\nfrom sklearn.externals import joblib\r\nimport os\r\nimport numpy as np\r\n\r\n# Training the SVC classifier\r\n\r\n# The training data would be all the face encodings from all the known images and the labels are their names\r\nencodings = []\r\nnames = []\r\n\r\n# Training directory\r\ntrain_dir = os.listdir('D:\\\\J\\\\Intania\\\\work\\\\IOT\\\\Final\\\\Belly-Blue\\\\face_recognition\\\\train_dir')\r\n\r\n# Loop through each person in the training directory\r\nfor person in train_dir:\r\n pix = os.listdir(\"D:\\\\J\\\\Intania\\\\work\\\\IOT\\\\Final\\\\Belly-Blue\\\\face_recognition\\\\train_dir\\\\\" + person)\r\n\r\n # Loop through each training image for the current person\r\n for person_img in pix:\r\n # Get the face encodings for the face in each image file\r\n face = face_recognition.load_image_file(\"D:\\\\J\\\\Intania\\\\work\\\\IOT\\\\Final\\\\Belly-Blue\\\\face_recognition\\\\train_dir\\\\\" + person + \"\\\\\" + person_img)\r\n face_bounding_boxes = face_recognition.face_locations(face)\r\n\r\n #If training image contains none or more than faces, print an error message and exit\r\n if len(face_bounding_boxes) != 1:\r\n print(person + \"/\" + person_img + \" contains none or more than one faces and can't be used for training.\")\r\n exit()\r\n else:\r\n face_enc = face_recognition.face_encodings(face)[0]\r\n # Add face encoding for current image with corresponding label (name) to the training data\r\n encodings.append(face_enc)\r\n names.append(person)\r\n\r\n# Create and train the SVC classifier\r\nclf = svm.SVC(gamma='scale',verbose=1,probability=True)\r\nclf.fit(encodings,names)\r\n\r\n#save model\r\nfilename = 'finalized_model.sav'\r\njoblib.dump(clf, filename)\r\n\r\n\r\n#====Load model and predict\r\n\r\n# Load the test image with unknown faces into a numpy array\r\ntest_image = face_recognition.load_image_file('test.jpg')\r\n\r\n# Find all the faces in the test image using the default HOG-based model\r\nface_locations = face_recognition.face_locations(test_image)\r\nno = len(face_locations)\r\nprint(\"Number of faces detected: \", no)\r\n\r\n# Predict all the faces in the test image using the trained classifier\r\nprint(\"Found:\")\r\n\r\n#load model\r\nloaded_model = joblib.load(filename)\r\n\r\nfor i in range(no):\r\n test_image_enc = face_recognition.face_encodings(test_image)[i]\r\n #name = loaded_model.predict([test_image_enc])\r\n class_probabilities = loaded_model.predict_proba([test_image_enc])\r\n print(class_probabilities)\r\n print(loaded_model.classes_)\r\n if np.max(class_probabilities[0])>=0.7:\r\n index = np.argmax(class_probabilities[0])\r\n print(loaded_model.classes_[index])\r\n else:\r\n print(\"Unknown\")\r\n #print(str(*name) + str(class_probabilities))","sub_path":"svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":3494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"494702344","text":"# Django\nfrom django.core.exceptions import (ObjectDoesNotExist, PermissionDenied)\nfrom django.shortcuts import (get_object_or_404, render, redirect)\nfrom django.contrib.auth import update_session_auth_hash\nfrom django.contrib.auth.forms import SetPasswordForm\nfrom django.utils.translation import ugettext as _\nfrom django.core.urlresolvers import reverse\nfrom django.db.models import (Q, Count)\nfrom django.views import generic\nfrom django.contrib import auth\nfrom django.http import Http404\nfrom django.db.models import F\n\n# Django braces\nfrom braces import views\n\n# Python Lib\nimport json\n\n# Custom\nfrom . import models\nfrom . import forms\nfrom . import utils\n\n\n# MIXIN\n\nclass AjaxMixin(views.JSONResponseMixin, views.AjaxResponseMixin, generic.View):\n pass\n\n\nclass LoginRequiredMixin(views.LoginRequiredMixin):\n login_url = 'index'\n\n\nclass AuthRedirectMixin(object):\n @classmethod\n def get_auth_redirect_url(cls):\n return reverse('user-dashboard-myprojects')\n\n def get_authenticated_redirect_url(self):\n return reverse('user-dashboard-myprojects')\n\n\nclass ProjectLeaderRequiredMixin(object):\n def dispatch(self, request, *args, **kwargs):\n if not request.user.is_leader(int(kwargs['pk'])):\n raise PermissionDenied\n return super(ProjectLeaderRequiredMixin, self).dispatch(request, *args, **kwargs)\n\n\nclass ProjectMemberRequiredMixin(object):\n def dispatch(self, request, *args, **kwargs):\n if not request.user.is_member(int(kwargs['pk'])):\n raise PermissionDenied\n return super(ProjectMemberRequiredMixin, self).dispatch(request, *args, **kwargs)\n\n\nclass TaskMakerRequiredMixin(object):\n def dispatch(self, request, *args, **kwargs):\n task = get_object_or_404(models.Task, pk=kwargs['task_pk'])\n if request.user != task.assigned_to and not request.user.is_leader(int(kwargs['pk'])):\n raise PermissionDenied\n return super(TaskMakerRequiredMixin, self).dispatch(request, *args, **kwargs)\n\n\n# AUTH\n\nclass IndexView(AuthRedirectMixin, views.AnonymousRequiredMixin, generic.TemplateView):\n template_name = 'index.jinja2'\n\n\nclass SignUpView(AuthRedirectMixin, views.AnonymousRequiredMixin, AjaxMixin):\n def post_ajax(self, request, *args, **kwargs):\n form = forms.SignUpForm(data=request.POST)\n if form.is_valid():\n user = form.save()\n user = auth.authenticate(username=user.email, password=form.cleaned_data['password1'])\n auth.login(request, user)\n return self.render_json_response({\n 'status': 'OK',\n 'next_url': AuthRedirectMixin.get_auth_redirect_url()\n })\n return self.render_json_response({\n 'status': 'Error',\n 'data': form.errors.as_json()\n })\n\n\nclass SignInView(AuthRedirectMixin, views.AnonymousRequiredMixin, AjaxMixin):\n def post_ajax(self, request, *args, **kwargs):\n form = forms.SignInForm(data=request.POST)\n if form.is_valid():\n user = form.get_user()\n auth.login(request, user)\n return self.render_json_response({\n 'status': 'OK',\n 'next_url': AuthRedirectMixin.get_auth_redirect_url()\n })\n return self.render_json_response({\n 'status': 'Error',\n 'data': form.errors.as_json()\n })\n\n\nclass LogoutView(LoginRequiredMixin, generic.View):\n def get(self, request, *args, **kwargs):\n auth.logout(request)\n return redirect(reverse('index'))\n\n\nclass ResetPasswordView(AuthRedirectMixin, views.AnonymousRequiredMixin, AjaxMixin):\n def post_ajax(self, request, *args, **kwargs):\n form = forms.ResetPasswordForm(data=request.POST)\n if form.is_valid():\n email = form.cleaned_data['email']\n user = models.User.objects.get(email=email)\n user.reset_password()\n return self.render_json_response({\n 'status': 'OK',\n 'message': _('Instruction has been sent to your email!')\n })\n return self.render_json_response({\n 'status': 'Error',\n 'data': form.errors.as_json()\n })\n\n\nclass SetPasswordView(AuthRedirectMixin, views.AnonymousRequiredMixin, AjaxMixin):\n def get(self, request, reset_key):\n user = models.User.objects.check_reset_password_key(reset_key)\n if user:\n form = SetPasswordForm(user=user)\n return render(request=request,\n template_name='set_new_password.jinja2',\n context={\n 'form': form,\n 'key': reset_key\n })\n raise Http404\n\n def post_ajax(self, request, *args, **kwargs):\n user = models.User.objects.check_reset_password_key(kwargs['reset_key'])\n form = SetPasswordForm(user=user, data=request.POST)\n if form.is_valid():\n user = form.save()\n user.make_password_reseted()\n return self.render_json_response({\n 'status': 'OK',\n 'next_url': reverse('index'),\n 'message': _('New password has been updated!')\n })\n return self.render_json_response({\n 'status': 'Error',\n 'data': form.errors.as_json()\n })\n\n\n# ACTION\n\nclass UserDashboardActionListView(LoginRequiredMixin, generic.View):\n def get(self, request, *args, **kwargs):\n actions = models.Action.objects.filter(user=request.user)\n requests = actions.filter(type=models.Action.TYPE_REQUEST)\n invites = actions.filter(type=models.Action.TYPE_INVITE)\n utils.reset_user_actions_cache(request.user.pk)\n return render(request=request,\n template_name='user_dashboard/actions.jinja2',\n context={\n 'invites': invites,\n 'requests': requests\n })\n\n\nclass ProjectDashboardActionListView(LoginRequiredMixin, ProjectLeaderRequiredMixin, generic.View):\n def get(self, request, *args, **kwargs):\n actions = models.Action.objects.filter(project_id=kwargs['pk'])\n requests = actions.filter(type=models.Action.TYPE_REQUEST)\n invites = actions.filter(type=models.Action.TYPE_INVITE)\n utils.reset_project_actions_cache(kwargs['pk'])\n return render(request=request,\n template_name='project_dashboard/actions.jinja2',\n context={\n 'invites': invites,\n 'requests': requests,\n 'pk': kwargs['pk']\n })\n\n\nclass RequestCreateView(LoginRequiredMixin, AjaxMixin):\n def post_ajax(self, request, *args, **kwargs):\n form = forms.ActionForm({\n 'project': request.POST.get('data', None),\n 'user': request.user.pk,\n 'type': models.Action.TYPE_REQUEST\n })\n if form.is_valid():\n request = form.save()\n utils.increase_project_actions_cache(request.project.pk)\n return self.render_json_response({\n 'status': 'OK',\n 'message': _('Request has been added!'),\n })\n return self.render_json_response({\n 'status': 'Error',\n 'message': _('Something wrong, try later')\n })\n\n\nclass InviteCreateView(LoginRequiredMixin, ProjectLeaderRequiredMixin, AjaxMixin):\n def post_ajax(self, request, *args, **kwargs):\n form = forms.ActionForm({\n 'project': kwargs['pk'],\n 'user': request.POST.get('data', None),\n 'type': models.Action.TYPE_INVITE\n })\n if form.is_valid():\n invite = form.save()\n utils.increase_user_actions_cache(invite.user.pk)\n # TODO: send email to user or notify him\n return self.render_json_response({\n 'status': 'OK',\n 'message': _('Invite has been added!')\n })\n return self.render_json_response({\n 'status': 'Error',\n 'message': _('Something wrong, try later')\n })\n\n\nclass InviteDeleteView(LoginRequiredMixin, ProjectLeaderRequiredMixin, AjaxMixin):\n def post_ajax(self, request, *args, **kwargs):\n user_pk = request.POST.get('data', None)\n if user_pk:\n count, task = models.Action.objects.filter(project_id=kwargs['pk'], user=user_pk).delete()\n if count:\n utils.decrease_user_actions_cache(user_pk)\n return self.render_json_response({\n 'status': 'OK',\n 'message': _('Invite has been removed!')\n })\n return self.render_json_response({\n 'status': 'Error',\n 'message': _('Something wrong, try later')\n })\n\n\nclass RequestDeleteView(LoginRequiredMixin, AjaxMixin):\n def post_ajax(self, request, *args, **kwargs):\n request_pk = request.POST.get('data', None)\n if request_pk:\n requests = models.Action.objects.filter(pk=request_pk, user=request.user)\n if requests.count():\n utils.decrease_project_actions_cache(requests[0].project.pk)\n requests.delete()\n return self.render_json_response({\n 'status': 'OK',\n 'message': _('Request has been deleted!')\n })\n return self.render_json_response({\n 'status': 'Error',\n 'message': _('Something wrong, try later')\n })\n\n\nclass InviteAcceptView(LoginRequiredMixin, AjaxMixin):\n def post_ajax(self, request, *args, **kwargs):\n invite_pk = request.POST.get('data', None)\n if invite_pk:\n try:\n invite = models.Action.objects.get(pk=invite_pk, user=request.user)\n models.Membership.objects.create(user=request.user, project=invite.project)\n invite.delete()\n return self.render_json_response({\n 'status': 'OK',\n 'message': _('Invite has been accepted!')\n })\n except ObjectDoesNotExist:\n pass\n return self.render_json_response({\n 'status': 'Error',\n 'message': _('Something wrong, try later')\n })\n\n\nclass RequestAcceptView(LoginRequiredMixin, ProjectLeaderRequiredMixin, AjaxMixin):\n def post_ajax(self, request, *args, **kwargs):\n user_pk = request.POST.get('data', None)\n if user_pk:\n try:\n project = models.Project.objects.get(pk=kwargs['pk'])\n user = models.User.objects.get(pk=user_pk)\n models.Action.objects.filter(project=project, user=user).delete()\n models.Membership.objects.create(user=user, project=project)\n # TODO: send email to user or notify him\n return self.render_json_response({\n 'status': 'OK',\n 'message': _('User has been added to project!')\n })\n except ObjectDoesNotExist:\n pass\n return self.render_json_response({\n 'status': 'Error',\n 'message': _('Something wrong, try later')\n })\n\n\nclass InviteRevokeView(LoginRequiredMixin, AjaxMixin):\n def post_ajax(self, request, *args, **kwargs):\n invite_pk = request.POST.get('data', None)\n if invite_pk:\n count, task = models.Action.objects.filter(pk=invite_pk, user=request.user).delete()\n if count:\n return self.render_json_response({\n 'status': 'OK',\n 'message': _('Invite has been revoked!')\n })\n return self.render_json_response({\n 'status': 'Error',\n 'message': _('Something wrong, try later')\n })\n\n\nclass RequestRevokeView(LoginRequiredMixin, ProjectLeaderRequiredMixin, AjaxMixin):\n def post_ajax(self, request, *args, **kwargs):\n user_pk = request.POST.get('data', None)\n if user_pk:\n count, task = models.Action.objects.filter(project_id=kwargs['pk'], user_id=user_pk).delete()\n if count:\n # TODO: send email to user or notify him\n return self.render_json_response({\n 'status': 'OK',\n 'message': _('Request has been revoked!')\n })\n return self.render_json_response({\n 'status': 'Error',\n 'message': _('Something wrong, try later')\n })\n\n\n# PROJECT\n\nclass ProjectListView(LoginRequiredMixin, generic.ListView):\n paginate_by = 20\n context_object_name = 'projects'\n template_name = 'user_dashboard/projects.jinja2'\n\n def get_queryset(self):\n action_projects = models.Action.objects.filter(user=self.request.user).values_list('project_id', flat=True)\n projects_work = self.request.user.projects.values_list('id', flat=True)\n exclude_projects = list(action_projects) + list(projects_work)\n projects = models.Project.objects.filter(is_private=False).exclude(pk__in=exclude_projects)\n query = self.request.GET.get('q', None)\n if query:\n projects = projects.filter(name__icontains=query)\n return projects\n\n\nclass MyProjectListView(LoginRequiredMixin, generic.View):\n def get(self, request, *args, **kwargs):\n membership = models.Membership.objects.filter(user=request.user).select_related('project')\n leader_membership = membership.filter(invite_reason=models.Membership.TYPE_LEADER)\n work_membership = membership.filter(invite_reason=models.Membership.TYPE_MEMBER)\n return render(request=request,\n template_name='user_dashboard/my_projects.jinja2',\n context={\n 'membership_leader': leader_membership,\n 'membership_worker': work_membership,\n 'form': forms.ProjectForm()\n })\n\n\nclass ProjectDeleteView(LoginRequiredMixin, AjaxMixin):\n def post_ajax(self, request, *args, **kwargs):\n membership_pk = request.POST.get('data', None)\n if membership_pk:\n try:\n membership = models.Membership.objects.get(pk=membership_pk, user=request.user,\n invite_reason=models.Membership.TYPE_LEADER)\n project = membership.project\n leaders = models.Membership.objects.filter(project=project,\n invite_reason=models.Membership.TYPE_LEADER)\n if leaders.count() == 1:\n project.delete()\n membership.delete()\n # TODO: delete all cache\n return self.render_json_response({\n 'status': 'OK',\n 'message': _('Project has been deleted!')\n })\n return self.render_json_response({\n 'status': 'Error',\n 'message': _('You can`t delete projects where leaders greater than one!')\n })\n except ObjectDoesNotExist:\n pass\n return self.render_json_response({\n 'status': 'Error',\n 'message': _('Something wrong, try later')\n })\n\n\nclass ProjectLeaveView(LoginRequiredMixin, AjaxMixin):\n def post_ajax(self, request, *args, **kwargs):\n project_pk = request.POST.get('data', None)\n if project_pk:\n try:\n membership = models.Membership.objects.get(project_id=project_pk, user=request.user)\n leaders = models.Membership.objects.filter(project_id=project_pk,\n invite_reason=models.Membership.TYPE_LEADER)\n if membership.invite_reason == models.Membership.TYPE_LEADER and leaders.count() == 1:\n return self.render_json_response({\n 'status': 'Error',\n 'message': _('You can`t leave projects where one leader! Delete this project')\n })\n utils.delete_my_tasks_cache(request.user.pk, project_pk)\n membership.delete()\n return self.render_json_response({\n 'status': 'OK',\n 'message': _('You have been left from project!'),\n 'next_url': reverse('index')\n })\n except ObjectDoesNotExist:\n pass\n return self.render_json_response({\n 'status': 'Error',\n 'message': _('Something wrong, try later')\n })\n\n\nclass ProjectCreateView(LoginRequiredMixin, AjaxMixin):\n def post_ajax(self, request, *args, **kwargs):\n form = forms.ProjectForm(data=request.POST)\n if form.is_valid():\n project = form.save()\n models.Membership.objects.create(user=request.user, project=project,\n invite_reason=models.Membership.TYPE_LEADER)\n return self.render_json_response({\n 'status': 'OK',\n 'message': _('Project has been created!')\n })\n return self.render_json_response({\n 'status': 'Error',\n 'data': form.errors.as_json()\n })\n\n\nclass ProjectUpdateView(LoginRequiredMixin, ProjectLeaderRequiredMixin, AjaxMixin):\n def get(self, request, *args, **kwargs):\n project = get_object_or_404(models.Project, pk=kwargs['pk'])\n return render(request=request,\n template_name='project_dashboard/project_update.jinja2',\n context={\n 'form': forms.ProjectForm(instance=project),\n 'pk': kwargs['pk']\n })\n\n def post_ajax(self, request, *args, **kwargs):\n try:\n project = models.Project.objects.get(pk=kwargs['pk'])\n form = forms.ProjectForm(data=request.POST, instance=project)\n if form.is_valid():\n form.save()\n return self.render_json_response({\n 'status': 'OK',\n 'message': _('Project has been updated!')\n })\n return self.render_json_response({\n 'status': 'Error',\n 'data': form.errors.as_json()\n })\n except ObjectDoesNotExist:\n return self.render_json_response({\n 'status': 'Error',\n 'message': _('Something wrong, try later')\n })\n\n\n# TEAM\n\nclass UserListView(LoginRequiredMixin, ProjectMemberRequiredMixin, generic.ListView):\n paginate_by = 20\n context_object_name = 'users'\n template_name = 'project_dashboard/users.jinja2'\n\n def get_queryset(self):\n project = get_object_or_404(models.Project, pk=self.kwargs['pk'])\n user_actions = models.Action.objects.filter(project=project).values_list('user_id', flat=True)\n project_members = project.members.values_list('id', flat=True)\n exclude_users = list(user_actions) + list(project_members)\n users = models.User.objects.exclude(pk__in=exclude_users)\n query = self.request.GET.get('q', None)\n if query:\n users = users.filter(Q(email__icontains=query) |\n Q(first_name__icontains=query) |\n Q(last_name__icontains=query) |\n Q(second_name__icontains=query))\n return users\n\n def get_context_data(self, **kwargs):\n context = super(UserListView, self).get_context_data(**kwargs)\n context['pk'] = self.kwargs['pk']\n return context\n\n\nclass UserUpdateView(LoginRequiredMixin, AjaxMixin):\n def get(self, request, *args, **kwargs):\n return render(request=request,\n template_name='user_dashboard/user_update.jinja2',\n context={\n 'form': forms.UpdateForm(instance=request.user)\n })\n\n def post_ajax(self, request, *args, **kwargs):\n form = forms.UpdateForm(data=request.POST, instance=request.user)\n if form.is_valid():\n user = form.save()\n update_session_auth_hash(request, user)\n return self.render_json_response({\n 'status': 'OK',\n 'message': _('Your profile has been updated!')\n })\n return self.render_json_response({\n 'status': 'Error',\n 'data': form.errors.as_json()\n })\n\n\nclass TeamListView(LoginRequiredMixin, ProjectMemberRequiredMixin, generic.View):\n def get(self, request, *args, **kwargs):\n project = get_object_or_404(models.Project, pk=kwargs['pk'])\n membership = models.Membership.objects.filter(project=project).select_related('user')\n leader_membership = membership.filter(invite_reason=models.Membership.TYPE_LEADER)\n work_membership = membership.filter(invite_reason=models.Membership.TYPE_MEMBER)\n return render(request=request,\n template_name='project_dashboard/team.jinja2',\n context={\n 'leader_membership': leader_membership,\n 'work_membership': work_membership,\n 'pk': kwargs['pk']\n })\n\n\nclass UserRemoveFromProjectView(LoginRequiredMixin, ProjectLeaderRequiredMixin, AjaxMixin):\n def post_ajax(self, request, *args, **kwargs):\n user_pk = request.POST.get('data', None)\n if user_pk:\n try:\n memberships = models.Membership.objects.filter(project_id=kwargs['pk'], user_id=user_pk,\n invite_reason=models.Membership.TYPE_MEMBER)\n count, task = memberships.delete()\n if count:\n # TODO: send email to user or notify him\n return self.render_json_response({\n 'status': 'OK',\n 'message': _('User has been removed from project!')\n })\n except ObjectDoesNotExist:\n pass\n return self.render_json_response({\n 'status': 'Error',\n 'message': _('Something wrong, try later')\n })\n\n\nclass UserMakeLeaderView(LoginRequiredMixin, ProjectLeaderRequiredMixin, AjaxMixin):\n def post_ajax(self, request, *args, **kwargs):\n user_pk = request.POST.get('data', None)\n if user_pk:\n try:\n memberships = models.Membership.objects.filter(project_id=kwargs['pk'], user_id=user_pk,\n invite_reason=models.Membership.TYPE_MEMBER)\n count = memberships.update(invite_reason=models.Membership.TYPE_LEADER)\n if count:\n # TODO: send email to user or notify him\n return self.render_json_response({\n 'status': 'OK',\n 'message': _('User has been added to leaders!')\n })\n except ObjectDoesNotExist:\n pass\n return self.render_json_response({\n 'status': 'Error',\n 'message': _('Something wrong, try later')\n })\n\n\n# TASK\n\nclass TaskListView(LoginRequiredMixin, ProjectMemberRequiredMixin, generic.View):\n def get(self, request, *args, **kwargs):\n project = get_object_or_404(models.Project, pk=kwargs['pk'])\n iterations = models.Iteration.objects.filter(project=project).annotate(count_tasks=Count('tasks'))\n return render(request=request,\n template_name='project_dashboard/tasks.jinja2',\n context={\n 'iterations': iterations,\n 'form': forms.IterationForm(),\n 'pk': kwargs['pk']\n })\n\n\nclass TaskDetailView(LoginRequiredMixin, ProjectMemberRequiredMixin, generic.View):\n def get(self, request, *args, **kwargs):\n task = get_object_or_404(models.Task, pk=kwargs['task_pk'], iteration__project_id=kwargs['pk'])\n return render(request=request,\n template_name='project_dashboard/task.jinja2',\n context={\n 'task': task,\n 'pk': kwargs['pk']\n })\n\n\nclass MyTaskListView(LoginRequiredMixin, ProjectMemberRequiredMixin, generic.View):\n def get(self, request, *args, **kwargs):\n tasks_assigned_to_me = models.Task.objects.filter(assigned_to=request.user, iteration__project_id=kwargs['pk'])\\\n .exclude(status=models.Task.STATUS_DONE)\n tasks_assigned_from_me = models.Task.objects.filter(assigned_from=request.user, iteration__project_id=kwargs['pk'])\n return render(request=request,\n template_name='project_dashboard/my_tasks.jinja2',\n context={\n 'tasks_assigned_to_me': tasks_assigned_to_me,\n 'tasks_assigned_from_me': tasks_assigned_from_me,\n 'pk': kwargs['pk']\n })\n\n\nclass TaskCreateView(LoginRequiredMixin, ProjectLeaderRequiredMixin, AjaxMixin):\n def get(self, request, *args, **kwargs):\n project = get_object_or_404(models.Project, pk=kwargs['pk'])\n return render(request=request,\n template_name='project_dashboard/task_create.jinja2',\n context={\n 'form': forms.TaskForm(project=project),\n 'pk': kwargs['pk']\n })\n\n def post_ajax(self, request, *args, **kwargs):\n try:\n project = models.Project.objects.get(pk=kwargs['pk'])\n form = forms.TaskForm(data=self.request.POST, project=project)\n if form.is_valid():\n task = form.save(commit=False)\n task.assigned_from = self.request.user\n task.save()\n # TODO: send email to user or notify him\n utils.increase_my_tasks_cache(task.assigned_to.pk, kwargs['pk'])\n if task.iteration.is_closed():\n task.iteration.make_active()\n return self.render_json_response({\n 'status': 'OK',\n 'message': _('Task has been added!'),\n 'next_url': reverse('project-dashboard-tasks', kwargs={'pk': project.pk})\n })\n return self.render_json_response({\n 'status': 'Error',\n 'data': form.errors.as_json()\n })\n except ObjectDoesNotExist:\n return self.render_json_response({\n 'status': 'Error',\n 'message': _('Something wrong, try later')\n })\n\n\nclass TaskUpdateView(LoginRequiredMixin, ProjectLeaderRequiredMixin, AjaxMixin):\n def get(self, request, *args, **kwargs):\n project = get_object_or_404(models.Project, pk=kwargs['pk'])\n task = get_object_or_404(models.Task, pk=kwargs['task_pk'])\n return render(request=request,\n template_name='project_dashboard/task_update.jinja2',\n context={\n 'form': forms.TaskForm(project=project, instance=task),\n 'pk': kwargs['pk'],\n 'task_pk': kwargs['task_pk']\n })\n\n def post_ajax(self, request, *args, **kwargs):\n try:\n project = models.Project.objects.get(pk=kwargs['pk'])\n task = models.Task.objects.get(pk=kwargs['task_pk'])\n form = forms.TaskForm(data=self.request.POST, project=project, instance=task)\n if form.is_valid():\n task = form.save()\n if task.iteration.can_close():\n task.iteration.close()\n elif task.iteration.is_closed():\n task.iteration.make_active()\n return self.render_json_response({\n 'status': 'OK',\n 'message': _('Task has been updated!'),\n 'next_url': reverse('project-dashboard-tasks', kwargs={'pk': project.pk})\n })\n return self.render_json_response({\n 'status': 'Error',\n 'data': form.errors.as_json()\n })\n except ObjectDoesNotExist:\n return self.render_json_response({\n 'status': 'Error',\n 'message': _('Something wrong, try later')\n })\n\n\nclass TaskDeleteView(LoginRequiredMixin, ProjectLeaderRequiredMixin, AjaxMixin):\n def post_ajax(self, request, *args, **kwargs):\n task_pk = request.POST.get('data', None)\n if task_pk:\n tasks = models.Task.objects.filter(pk=task_pk, iteration__project_id=kwargs['pk'])\n if tasks.count():\n utils.decrease_my_tasks_cache(tasks[0].assigned_to.pk, kwargs['pk'])\n tasks.delete()\n return self.render_json_response({\n 'status': 'OK',\n 'message': _('Task has been removed!')\n })\n return self.render_json_response({\n 'status': 'Error',\n 'message': _('Something wrong, try later')\n })\n\n\nclass TaskStartView(LoginRequiredMixin, TaskMakerRequiredMixin, generic.View):\n def post(self, request, *args, **kwargs):\n task = get_object_or_404(models.Task, pk=kwargs['task_pk'], iteration__project_id=kwargs['pk'])\n task.status = models.Task.STATUS_IN_PROGRESS\n task.save()\n return render(request=request,\n template_name='project_dashboard/task.jinja2',\n context={\n 'task': task,\n 'pk': kwargs['pk']\n })\n\n\nclass TaskCloseView(LoginRequiredMixin, TaskMakerRequiredMixin, generic.View):\n def post(self, request, *args, **kwargs):\n task = get_object_or_404(models.Task, pk=kwargs['task_pk'], iteration__project_id=kwargs['pk'])\n # TODO: if task is testable set STATUS_TESTING and make request to CI service\n task.status = models.Task.STATUS_DONE\n utils.decrease_my_tasks_cache(task.assigned_to.pk, kwargs['pk'])\n task.save()\n if task.iteration.can_close():\n task.iteration.close()\n return render(request=request,\n template_name='project_dashboard/task.jinja2',\n context={\n 'task': task,\n 'pk': kwargs['pk']\n })\n\n\n# ITERATION\n\nclass IterationCreateView(LoginRequiredMixin, ProjectLeaderRequiredMixin, AjaxMixin):\n def post_ajax(self, request, *args, **kwargs):\n try:\n project = models.Project.objects.get(pk=kwargs['pk'])\n form = forms.IterationForm(data=request.POST)\n if form.is_valid():\n iteration = form.save(commit=False)\n iteration.project = project\n iteration.save()\n return self.render_json_response({\n 'status': 'OK',\n 'message': _('Iteration has been created!')\n })\n return self.render_json_response({\n 'status': 'Error',\n 'data': form.errors.as_json()\n })\n except ObjectDoesNotExist:\n return self.render_json_response({\n 'status': 'Error',\n 'message': _('Something wrong, try later')\n })\n\n\nclass IterationUpdateView(LoginRequiredMixin, ProjectLeaderRequiredMixin, AjaxMixin):\n def get(self, request, *args, **kwargs):\n iteration = get_object_or_404(models.Iteration, pk=kwargs['iteration_pk'])\n return render(request=request,\n template_name='project_dashboard/iteration_update.jinja2',\n context={\n 'form': forms.IterationForm(instance=iteration),\n 'pk': kwargs['pk'],\n 'iteration_pk': kwargs['iteration_pk']\n })\n\n def post_ajax(self, request, *args, **kwargs):\n try:\n iteration = models.Iteration.objects.get(pk=kwargs['iteration_pk'])\n form = forms.IterationForm(data=self.request.POST, instance=iteration)\n if form.is_valid():\n form.save()\n return self.render_json_response({\n 'status': 'OK',\n 'message': _('Iteration has been updated!'),\n 'next_url': reverse('project-dashboard-tasks', kwargs={'pk': kwargs['pk']})\n })\n return self.render_json_response({\n 'status': 'Error',\n 'data': form.errors.as_json()\n })\n except ObjectDoesNotExist:\n return self.render_json_response({\n 'status': 'Error',\n 'message': _('Something wrong, try later')\n })\n\n\nclass IterationDeleteView(LoginRequiredMixin, ProjectLeaderRequiredMixin, AjaxMixin):\n def post_ajax(self, request, *args, **kwargs):\n iteration_pk = request.POST.get('data', None)\n if iteration_pk:\n count, task = models.Iteration.objects.filter(pk=iteration_pk, project_id=kwargs['pk']).delete()\n if count:\n return self.render_json_response({\n 'status': 'OK',\n 'message': _('Iteration has been removed!')\n })\n return self.render_json_response({\n 'status': 'Error',\n 'message': _('Something wrong, try later')\n })\n\n\n# FILE\n\nclass UploadFile(LoginRequiredMixin, ProjectLeaderRequiredMixin, AjaxMixin):\n def post_ajax(self, request, *args, **kwargs):\n try:\n project = models.Project.objects.get(pk=kwargs['pk'])\n form = forms.FileForm(request.POST, request.FILES)\n if form.is_valid():\n file = form.save(commit=False)\n file.project = project\n file.user = request.user\n file.save()\n return self.render_json_response({\n 'status': 'OK',\n 'message': _('File has been uploaded!'),\n 'data': file.get_url()\n })\n return self.render_json_response({\n 'status': 'Error',\n 'data': form.errors.as_json()\n })\n except ObjectDoesNotExist:\n return self.render_json_response({\n 'status': 'Error',\n 'message': _('Something wrong, try later')\n })\n\n\nclass CalculateStatistics(LoginRequiredMixin, ProjectLeaderRequiredMixin, generic.View):\n def get(self, request, *args, **kwargs):\n project = get_object_or_404(models.Project, pk=kwargs['pk'])\n members = project.members.all()\n\n data = []\n for user in members:\n user_tasks = user.tasks_assigned_to_me.filter(iteration__project_id=kwargs['pk'])\n item = {\n 'user': user,\n 'tasks': user_tasks,\n 'completed': user_tasks.filter(status=models.Task.STATUS_DONE).count(),\n 'current': user_tasks.exclude(status=models.Task.STATUS_DONE).count(),\n 'deadlined': user_tasks.filter(end__gte=F('deadline')).count(),\n }\n item['rating'] = item['completed'] - item['deadlined']\n data.append(item)\n\n tasks = models.Task.objects.filter(iteration__project_id=kwargs['pk'])\n tasks_status_raw = tasks.values('status').annotate(count=Count('status'))\n tasks_status = {'status': [], 'count': []}\n for item in tasks_status_raw:\n tasks_status['count'].append(item['count'])\n tasks_status['status'].append(models.Task.get_status_value(item['status']))\n\n tasks_type_raw = tasks.values('type').annotate(count=Count('type'))\n tasks_type = {'type': [], 'count': []}\n for item in tasks_type_raw:\n tasks_type['count'].append(item['count'])\n tasks_type['type'].append(models.Task.get_type_value(item['type']))\n\n tasks_close_date = {\n 'status': [_('Deadline'), _('Not Deadline')],\n 'values': [0, 0]\n }\n for task in tasks:\n if task.is_done():\n if task.is_deadlined():\n tasks_close_date['values'][0] += 1\n else:\n tasks_close_date['values'][1] += 1\n\n iterations = models.Iteration.objects.filter(project_id=kwargs['pk'])\n iterations_status = {\n 'status': [_('Active'), _('Closed')],\n 'values': [iterations.filter(status=models.Iteration.STATUS_ACTIVE).count(),\n iterations.filter(status=models.Iteration.STATUS_CLOSED).count()]\n }\n\n return render(request=request,\n template_name='project_dashboard/stats.jinja2',\n context={\n 'iterations_status': json.dumps(iterations_status),\n 'tasks_close_date': json.dumps(tasks_close_date),\n 'tasks_status': json.dumps(tasks_status),\n 'tasks_type': json.dumps(tasks_type),\n 'users': data,\n 'pk': kwargs['pk'],\n })\n\n\nclass GanttDiagram(LoginRequiredMixin, ProjectLeaderRequiredMixin, generic.View):\n def get(self, request, *args, **kwargs):\n gantt_data = {\n 'data': [],\n 'links': []\n }\n counter_tasks, counter_links = 1, 1\n\n iterations = models.Iteration.objects.filter(project_id=kwargs['pk'])\n for iteration in iterations:\n gantt_data['data'].append({\n 'id': counter_tasks,\n 'text': iteration.name,\n 'start_date': iteration.creation_date.date().isoformat(),\n 'duration': iteration.get_duration(),\n 'progress': iteration.get_progress(),\n 'open': iteration.is_closed()\n })\n tasks = iteration.tasks.order_by('issue_date')\n iteration_id = counter_tasks\n counter_tasks += 1\n for task in tasks:\n gantt_data['data'].append({\n 'id': counter_tasks,\n 'text': task.name,\n 'start_date': task.issue_date.isoformat(),\n 'duration': task.get_duration(),\n 'progress': task.get_progress(),\n 'open': task.is_done(),\n 'parent': iteration_id,\n 'level': task.priority\n })\n gantt_data['links'].append({\n 'id': counter_links,\n 'source': iteration_id,\n 'target': counter_tasks,\n 'type': '1'\n })\n counter_tasks += 1\n counter_links += 1\n\n return render(request=request,\n template_name='project_dashboard/gantt.jinja2',\n context={\n 'tasks': json.dumps(gantt_data),\n 'pk': kwargs['pk']\n })\n","sub_path":"lastareas/tasks/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":39819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"556238508","text":"from __future__ import unicode_literals\n\nimport codecs\nimport os\nimport six\nimport shutil\nimport tempfile\nfrom unittest import TestCase\n\nfrom click.testing import CliRunner\n\nfrom taxi.backends import BaseBackend, PushEntryFailed, PushEntriesFailed\nfrom taxi.backends.registry import backends_registry\nfrom taxi.commands.base import cli\nfrom taxi.projects import ProjectsDb\nfrom taxi.utils.file import expand_date\n\n\nclass TestBackendEntryPoint(object):\n \"\"\"\n Dedicated backend for tests. Entries with the alias `fail` will fail when\n trying to push them.\n \"\"\"\n class TestBackend(BaseBackend):\n def __init__(self, *args, **kwargs):\n super(TestBackendEntryPoint.TestBackend, self).__init__(\n *args, **kwargs\n )\n self.entries = []\n\n def push_entry(self, date, entry):\n self.entries.append(entry)\n\n if entry.alias == 'fail':\n raise PushEntryFailed()\n\n def post_push_entries(self):\n failed_entries = {}\n\n for entry in self.entries:\n if entry.alias == 'post_push_fail':\n failed_entries[entry] = 'foobar'\n\n if failed_entries:\n raise PushEntriesFailed(entries=failed_entries)\n\n def load(self):\n return self.TestBackend\n\n\nclass CommandTestCase(TestCase):\n def setUp(self):\n _, self.config_file = tempfile.mkstemp()\n _, self.entries_file = tempfile.mkstemp()\n self.taxi_dir = tempfile.mkdtemp()\n # Keep the original entry points to restore them in tearDown\n self.backends_original_entry_points = backends_registry._entry_points\n\n # Hot swap the entry points from the backends registry with our own\n # test backend. This avoids having to register the test backend in the\n # setup.py file\n backends_registry._entry_points = {\n 'test': TestBackendEntryPoint()\n }\n\n # Create an empty projects db file\n projects_db_file = os.path.join(self.taxi_dir,\n ProjectsDb.PROJECTS_FILE)\n with open(projects_db_file, 'w') as f:\n f.close()\n\n self.default_config = {\n 'default': {\n 'date_format': '%d/%m/%Y',\n 'editor': '/bin/true',\n 'file': self.entries_file,\n 'use_colors': '0'\n },\n 'backends': {\n 'test': 'test:///',\n },\n 'test_aliases': {\n 'alias_1': '123/456',\n 'fail': '456/789',\n 'post_push_fail': '456/789',\n },\n }\n\n def tearDown(self):\n backends_registry._entry_points = self.backends_original_entry_points\n entries_file = expand_date(self.entries_file)\n\n os.remove(self.config_file)\n if os.path.exists(entries_file):\n os.remove(entries_file)\n shutil.rmtree(self.taxi_dir)\n\n def write_config(self, config):\n with open(self.config_file, 'w') as f:\n for (section, params) in six.iteritems(config):\n f.write(\"[%s]\\n\" % section)\n\n for (param, value) in six.iteritems(params):\n f.write(\"%s = %s\\n\" % (param, value))\n\n def write_entries(self, contents):\n with codecs.open(expand_date(self.entries_file), 'a', 'utf-8') as f:\n f.write(contents)\n\n def read_entries(self):\n with codecs.open(expand_date(self.entries_file), 'r', 'utf-8') as f:\n contents = f.read()\n\n return contents\n\n def run_command(self, command_name, args=None, config_options=None,\n input=None):\n \"\"\"\n Run the given taxi command with the given arguments and options. Before\n running the command, the configuration file is written with the given\n `config_options`, if any, or with the default config options.\n\n The output of the command is returned as a string.\n\n Args:\n command_name -- The name of the command to run\n args -- An optional list of arguments for the command\n config_options -- An optional options hash that will be used to\n write the config file before running the command\n \"\"\"\n if args is None:\n args = []\n\n if config_options is None:\n config_options = self.default_config\n else:\n config_options = dict(\n list(self.default_config.items()) +\n list(config_options.items())\n )\n self.write_config(config_options)\n\n args.insert(0, command_name)\n args.insert(0, '--taxi-dir=%s' % self.taxi_dir)\n args.insert(0, '--config=%s' % self.config_file)\n\n runner = CliRunner()\n result = runner.invoke(cli, args, input=input)\n\n return result.output\n","sub_path":"tests/commands/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"227724078","text":"import os \nimport numpy as np \nimport nrrd \nimport cv2 \nimport SimpleITK as sitk \nimport shutil \nimport nrrd \nimport csv \nimport logging \nimport glob \nimport imageio\nimport nibabel as nib \nfrom PIL import Image \nfrom skimage import measure, io \n\n\n# set logging information\nlogger = logging.getLogger(__name__)\nlogger.setLevel(level=logging.INFO)\nhandler = logging.StreamHandler()\nformatter = logging.Formatter('%(message)s')\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\n\n# path = '/home/drs/Desktop/mvi_data/MVI'\npath = '/media/drs/extra/Datasets/mvi_data/MVI'\n\n\n# read mvi csv file\ncsv_file = open(os.path.join(os.path.dirname(path), 'mvi.csv'), 'r')\ncsv_reader = csv.reader(csv_file)\nmvi = {}\nfor item in csv_reader:\n if csv_reader.line_num == 1:\n continue\n mvi[item[0]] = item[1]\ncsv_file.close()\n\nlogger.info('the csv file has been loaded')\n\n\n# build a directory to save images\nimage_dir = os.path.join(os.path.dirname(path), '3phase_image')\nif not os.path.exists(image_dir):\n os.mkdir(image_dir)\n\n\n# build a directory to save npy\nnpy_dir = os.path.join(os.path.dirname(path), '3phase_npy')\nif not os.path.exists(npy_dir):\n os.mkdir(npy_dir)\n\n\n# get the case path \ncase_list = glob.glob(path + '/*/*')\nlogger.info(case_list)\nfor case_path in case_list:\n logger.info(case_path)\n\n # get the art nrrd file\n idx = -1\n item_list = os.listdir(case_path)\n item_list.sort()\n for item in item_list:\n idx += 1\n if item.split('_')[-1] == 'ART.nrrd':\n art_mask_slice = idx\n elif item.split('_')[-1] == 'NC.nrrd':\n nc_mask_slice = idx \n elif item.split('_')[-1] == 'PV.nrrd':\n pv_mask_slice = idx \n\n # get three phase image and mask path\n art_image_path = case_path + '/' + item_list[art_mask_slice - 1]\n art_mask_path = case_path + '/' + item_list[art_mask_slice]\n nc_image_path = case_path + '/' + item_list[nc_mask_slice - 1]\n nc_mask_path = case_path + '/' + item_list[nc_mask_slice]\n pv_image_path = case_path + '/' + item_list[pv_mask_slice - 1]\n pv_mask_path = case_path + '/' + item_list[pv_mask_slice]\n\n # get art image array\n art_array = nib.load(art_image_path).get_data() # channel last\n nc_array = nib.load(nc_image_path).get_data() # channel last\n pv_array = nib.load(pv_image_path).get_data() # channel last\n\n # get art mask array\n art_mask_array, _ = nrrd.read(art_mask_path)\n art_mask_slice = list(set(np.nonzero(art_mask_array)[-1]))\n\n # use the maximum connectivity method to find the largest slice\n largest_area = 0\n largest_slice = 0\n for idx in art_mask_slice:\n img_labeled = measure.label(art_mask_array[:, :, idx], connectivity=2)\n prop = measure.regionprops(img_labeled)\n area = prop[0].area\n if area > largest_area:\n largest_area = area \n largest_slice = idx \n # get the bounding box\n bbox = prop[0].bbox\n \n art_bbox_mask = np.zeros((art_mask_array.shape[0], art_mask_array.shape[1]))\n length = int((bbox[2] - bbox[0]) * 0.1)\n height = int((bbox[3] - bbox[1]) * 0.1)\n art_bbox_mask[(bbox[0] - length): (bbox[2] + length + 1), (bbox[1] - height): (bbox[3] + height + 1)] = 1\n \n for i in art_mask_slice:\n art_slice_array = art_array[:, :, i]\n nc_slice_array = nc_array[:, :, i]\n pv_slice_array = pv_array[:, :, i]\n\n # get mean and std of one slice\n art_mean = art_slice_array.mean()\n art_std = art_slice_array.std()\n nc_mean = nc_slice_array.mean()\n nc_std = nc_slice_array.std()\n pv_mean = pv_slice_array.mean()\n pv_std = pv_slice_array.std()\n\n # get the lower and upper bound \n # mean+-3*std get 99.73% data\n art_lower = np.percentile(art_slice_array, 0.14)\n art_upper = np.percentile(art_slice_array, 99.86)\n nc_lower = np.percentile(nc_slice_array, 0.14)\n nc_upper = np.percentile(nc_slice_array, 99.86)\n pv_lower = np.percentile(pv_slice_array, 0.14)\n pv_upper = np.percentile(pv_slice_array, 99.86)\n\n # truncate the array\n art_slice_array[art_slice_array < art_lower] = art_lower \n art_slice_array[art_slice_array > art_upper] = art_upper\n nc_slice_array[nc_slice_array < nc_lower] = nc_lower\n nc_slice_array[nc_slice_array > nc_upper] = nc_upper\n pv_slice_array[pv_slice_array < pv_lower] = pv_lower\n pv_slice_array[pv_slice_array > pv_upper] = pv_upper\n\n # do normalization\n art_slice_array = art_slice_array.astype(dtype=np.float32)\n art_slice_array = (art_slice_array - art_mean) / art_std \n nc_slice_array = nc_slice_array.astype(dtype=np.float32)\n nc_slice_array = (nc_slice_array - nc_mean) / nc_std \n pv_slice_array = pv_slice_array.astype(dtype=np.float32)\n pv_slice_array = (pv_slice_array - pv_mean) / pv_std\n\n # get the roi area\n art_roi_array = np.multiply(art_slice_array, art_bbox_mask)\n nc_roi_array = np.multiply(nc_slice_array, art_bbox_mask)\n pv_roi_array = np.multiply(pv_slice_array, art_bbox_mask)\n\n # set the name of image and npy\n id_num = case_path.split('/')[-2]\n art_image_name = id_num + '_' + str(i) + '_art_' + mvi[id_num] + '.jpg'\n art_npy_name = id_num + '_' + str(i) + '_art_' + mvi[id_num] + '.npy'\n nc_image_name = id_num + '_' + str(i) + '_nc_' + mvi[id_num] + '.jpg'\n nc_npy_name = id_num + '_' + str(i) + '_nc_' + mvi[id_num] + '.npy'\n pv_image_name = id_num + '_' + str(i) + '_pv_' + mvi[id_num] + '.jpg'\n pv_npy_name = id_num + '_' + str(i) + '_pv_' + mvi[id_num] + '.npy'\n \n # save the array to file\n imageio.imwrite(os.path.join(image_dir, art_image_name), art_roi_array)\n imageio.imwrite(os.path.join(image_dir, nc_image_name), nc_roi_array)\n imageio.imwrite(os.path.join(image_dir, pv_image_name), pv_roi_array)\n np.save(os.path.join(npy_dir, art_npy_name), art_roi_array)\n np.save(os.path.join(npy_dir, nc_npy_name), nc_roi_array)\n np.save(os.path.join(npy_dir, pv_npy_name), pv_roi_array)\n ","sub_path":"data/get3phase.py","file_name":"get3phase.py","file_ext":"py","file_size_in_byte":6200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"350272909","text":"import turtle\r\nimport time\r\nimport random\r\n\r\ndelay=0.1\r\n\r\n#score\r\nscore=0\r\nplayerB=0\r\n\r\n#set up the screen\r\nwn=turtle.Screen()\r\nwn.title(\"snake game by soumik\")\r\nwn.bgcolor(\"green\")\r\nwn.setup(width=600,height=600)\r\nwn.tracer(0)#turn off the screen updates\r\n\r\n#snake head\r\nhead=turtle.Turtle()\r\nhead.speed(0)\r\nhead.shape(\"square\")\r\nhead.color(\"black\")\r\nhead.penup()\r\nhead.goto(100,0)\r\nhead.direction=\"stop\"\r\n#snake head2\r\nhead2=turtle.Turtle()\r\nhead2.speed(0)\r\nhead2.shape(\"square\")\r\nhead2.color(\"white\")\r\nhead2.penup()\r\nhead2.goto(-100,0)\r\nhead2.direction=\"stop\"\r\n\r\n\r\n\r\n#snake food\r\nfood=turtle.Turtle()\r\nfood.speed(0)\r\nfood.shape(\"circle\")\r\nfood.color(\"red\")\r\nfood.penup()\r\nfood.goto(0,100)\r\n\r\nsegments=[]\r\nsegs=[]\r\n\r\n#pen\r\npen=turtle.Turtle()\r\npen.speed(0)\r\npen.shape(\"square\")\r\npen.color(\"white\")\r\npen.penup()\r\npen.hideturtle()\r\npen.goto(0,260)\r\npen.write(\"score: 0 playerB: 0\",align=\"center\", font=(\"courier\",24,\"normal\"))\r\n\r\n#functions\r\ndef go_up():\r\n if head.direction !=\"down\":\r\n head.direction=\"up\"\r\ndef go_down():\r\n if head.direction !=\"up\":\r\n head.direction=\"down\"\r\ndef go_left():\r\n if head.direction !=\"right\":\r\n head.direction=\"left\"\r\ndef go_right():\r\n if head.direction !=\"left\":\r\n head.direction=\"right\"\r\n#functions\r\ndef go_upa():\r\n if head2.direction !=\"down\":\r\n head2.direction=\"up\"\r\ndef go_downa():\r\n if head2.direction !=\"up\":\r\n head2.direction=\"down\"\r\ndef go_lefta():\r\n if head2.direction !=\"right\":\r\n head2.direction=\"left\"\r\ndef go_righta():\r\n if head2.direction !=\"left\":\r\n head2.direction=\"right\"\r\n\r\n\r\n\r\n\r\ndef move():\r\n if head.direction==\"up\":\r\n y=head.ycor()\r\n head.sety(y+20)\r\n if head.direction==\"down\":\r\n y=head.ycor()\r\n head.sety(y-20)\r\n if head.direction==\"left\":\r\n x=head.xcor()\r\n head.setx(x-20)\r\n if head.direction==\"right\":\r\n x=head.xcor()\r\n head.setx(x+20)\r\ndef move2():\r\n if head2.direction==\"up\":\r\n y=head2.ycor()\r\n head2.sety(y+20)\r\n if head2.direction==\"down\":\r\n y=head2.ycor()\r\n head2.sety(y-20)\r\n if head2.direction==\"left\":\r\n x=head2.xcor()\r\n head2.setx(x-20)\r\n if head2.direction==\"right\":\r\n x=head2.xcor()\r\n head2.setx(x+20)\r\n\r\n\r\n#keyboard bindings\r\nwn.listen()\r\nwn.onkeypress(go_up,\"Up\")\r\nwn.onkeypress(go_down,\"Down\")\r\nwn.onkeypress(go_left,\"Left\")\r\nwn.onkeypress(go_right,\"Right\")\r\n#keyboard bindings\r\nwn.listen()\r\nwn.onkeypress(go_upa,\"w\")\r\nwn.onkeypress(go_downa,\"s\")\r\nwn.onkeypress(go_lefta,\"a\")\r\nwn.onkeypress(go_righta,\"d\")\r\n\r\n#main game loop\r\nwhile True:\r\n wn.update()\r\n\r\n #check for collision wiht the border\r\n if head.xcor()>290 or head.xcor()<-290 or head.ycor()>290 or head.ycor()<-290:\r\n time.sleep(1)\r\n head.direction=\"stop\"\r\n if head2.xcor()>290 or head2.xcor()<-290 or head2.ycor()>290 or head2.ycor()<-290:\r\n time.sleep(1)\r\n head2.direction=\"stop\"\r\n\r\n #hide the segments\r\n for segment in segments:\r\n segment.goto(1000,1000)\r\n for seg in segs:\r\n seg.goto(1000,1000)\r\n\r\n #clear the segments list\r\n segments.clear()\r\n segs.clear()\r\n #reset the score\r\n score=0\r\n\r\n #reset the delay\r\n delay=0.1\r\n\r\n pen.clear()\r\n pen.write(\"score:{} high score: {}\".format(score, playerB),align=\"center\",font=(\"courier\",24,\"normal\")) \r\n \r\n #check for colission with the food\r\n if head.distance(food)<19:\r\n x=random.randint(-290,290)\r\n y=random.randint(-290,290)\r\n food.goto(x,y)\r\n #check for colission with the food\r\n if head2.distance(food)<19:\r\n x=random.randint(-290,290)\r\n y=random.randint(-290,290)\r\n food.goto(x,y)\r\n \r\n\r\n #add a segment\r\n new_segment=turtle.Turtle()\r\n new_segment.speed(0)\r\n new_segment.shape(\"square\")\r\n new_segment.color(\"black\")\r\n new_segment.penup()\r\n segments.append(new_segment)\r\n #add a seg\r\n new_seg=turtle.Turtle()\r\n new_seg.speed(0)\r\n new_seg.shape(\"square\")\r\n new_seg.color(\"white\")\r\n new_seg.penup()\r\n segs.append(new_seg)\r\n\r\n\r\n #shorten the display\r\n delay-=0.001\r\n\r\n #increase the score\r\n score+=1\r\n\r\n if score>playerB:\r\n playerB=score\r\n pen.clear()\r\n pen.write(\"score:{} high score: {}\".format(score, playerB),align=\"center\",font=(\"courier\",24,\"normal\"))\r\n\r\n #move the end segments first in reverse order\r\n for index in range(len(segments)-1,0,-1):\r\n x=segments[index-1].xcor()\r\n y=segments[index-1].ycor()\r\n segments[index].goto(x,y)\r\n #move2 the end segs first in reverse order\r\n for index in range(len(segs)-1,0,-1):\r\n x=segs[index-1].xcor()\r\n y=segs[index-1].ycor()\r\n segs[index].goto(x,y)\r\n\r\n\r\n #move segment 0 to where the head is\r\n if len(segments)>0:\r\n x=head.xcor()\r\n y=head.ycor()\r\n segments[0].goto(x,y)\r\n move()\r\n #move2 seg 0 to where the head2 is\r\n if len(segs)>0:\r\n x=head2.xcor()\r\n y=head2.ycor()\r\n segs[0].goto(x,y)\r\n move2()\r\n\r\n #move2 the end segs first in reverse order\r\n for index in range(len(segs)-1,0,-1):\r\n x=segs[index-1].xcor()\r\n y=segs[index-1].ycor()\r\n segs[index].goto(x,y)\r\n #move2 the end segs first in reverse order\r\n for index in range(len(segs)-1,0,-1):\r\n x=segs[index-1].xcor()\r\n y=segs[index-1].ycor()\r\n segs[index].goto(x,y)\r\n\r\n\r\n #check for head colission\r\n for segment in segments:\r\n if segment.distance(head)<20:\r\n time.sleep(1)\r\n head.goto(0,0)\r\n head.direction=\"stop\"\r\n #check for head2 colission\r\n for seg in segs:\r\n if seg.distance(head2)<20:\r\n time.sleep(1)\r\n head2.goto(0,0)\r\n head2.direction=\"stop\"\r\n\r\n\r\n\r\n #hide the segments\r\n for segment in segments:\r\n segment.goto(1000,1000)\r\n #hide the segs\r\n for seg in segs:\r\n seg.goto(1000,1000)\r\n \r\n #clear the segments list\r\n segments.clear()\r\n segs.clear()\r\n\r\n #reset the score\r\n score=0\r\n #reset the delay\r\n delay=0.1\r\n #update the score display\r\n pen.clear()\r\n pen.write(\"score:{} high score: {}\".format(score, playerB),align=\"center\",font=(\"courier\",24,\"normal\"))\r\n \r\n time.sleep(delay)\r\n\r\nwn.mainloop()\r\n\r\n","sub_path":"two players snake game.py","file_name":"two players snake game.py","file_ext":"py","file_size_in_byte":6336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"122500425","text":"import turtle\nfrom turtle import Turtle\nimport time\nimport random\nimport math\nfrom threading import Thread\n\n#turtle.goto()\n\n\nturtle.hideturtle()\nturtle.tracer(delay=0)\nturtle.setup(800,600)\nturtle.bgcolor(\"BLACK\")\n\n\nclass Ball(Turtle):\n def __init__(self,x,y,color):\n Turtle.__init__(self)\n self.x=x\n self.y=y\n self.penup()\n self.goto(x, y)\n self.r=20\n self.color(color)\n self.shape(\"circle\")\n self.shapesize(2)\n num=0\n num=random.randint(0,2)\n if num == 0:\n self.color(\"red\") \n elif num==1:\n self.color(\"green\")\n elif num==2:\n self.color(\"blue\")\n\nx=-420\ny=250\nBalls=[]\nfor i in range(3):\n while(x<=380): \n x=x+40\n color= (random.random(), random.random(), random.random())\n new = Ball(x,y,color)\n Balls.append(new)\n y-=40\n x=-420\n\n\nMY_BALL = Ball(0,0,\"black\")\nMY_BALL.goto(0,-250)\nMY_BALL.penup()\nturtle.penup()\n\n\n \n\ndef collide(ball_a,ball_b): \n d = math.sqrt(math.pow(ball_a.xcor()-ball_b.xcor(),2)+math.pow(ball_a.ycor()-ball_b.ycor(),2))\n if d <= ball_a.r + ball_b.r:\n return True\n \n else:\n return False\n \n \ndef check_color(ball_a,ball_b):\n if (ball_a.color()) == (ball_b.color()):\n return True\n \n else:\n return False\ndef collision_with_myball():\n for ball in Balls:\n if collide(MY_BALL,ball) and check_color(MY_BALL,ball):\n ball.reset()\n Balls.remove(ball)\n ball.ht()\n MY_BALL.reset()\n MY_BALL.pu()\n MY_BALL.goto(-1000, -1000)\n MY_BALL.ht()\n return True\n elif collide(MY_BALL, ball):\n return True\n return False\n\n\n\ndef create_new_ball():\n global MY_BALL\n Balls.append(MY_BALL)\n MY_BALL = Ball(0, -250, \"black\")\n MY_BALL.pu()\n turtle.update()\n\n\ndef my_ball_move(angle):\n MY_BALL.forward(angle)\n print(\"Finished left\")\n new_ball()\n\ndef angle(x, y):\n na = y / x\n an = math.atan(na)\n angle = math.degrees(an)\n \n if(angle < 0):\n angle = angle+ 180\n print(angle)\n MY_BALL.setheading(0)\n MY_BALL.left(angle)\n \n \n while True:\n MY_BALL.fd(10)\n if collision_with_myball():\n new_ball()\n break\n \n\ndef stop_game():\n for i in range(16):\n print(i)\n time.sleep(i)\n quit()\n\nt1 = Thread(target = stop_game)\nt1.start()\n\nturtle.getscreen().onclick(angle)\n\ndef ball_on_field():\n if len(Balls)<1:\n return False\n else:\n return True\n\ndef new_ball():\n global MY_BALL\n Balls.append(MY_BALL)\n MY_BALL = Ball(0,0,\"black\")\n MY_BALL.goto(0,-250)\n MY_BALL.penup()\n\n##\n##while True :\n## if (MY_BALL.ycor()>600):\n## new_ball()\n\n#collision_with_myball()\n#turtle.update()\n\n#while ball_on_field == True:\n #create_new_ball()\n #collision_with_myball()\n #my_ball_move(angle)\n #turtle.update()\n \n \n \n#while ball_on_field ==True:\n# print(\"good job \")\n","sub_path":"were_done.py","file_name":"were_done.py","file_ext":"py","file_size_in_byte":3270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"539603154","text":"# -*- coding: utf-8 -*- \n'''\nCreated on 2017. 8. 29.\n\n@author: jaehyeong\n'''\n# threshold를 자동으로, 조명의 영향을 더 줄이기 -> Adaptive Threshold(이미지를 잘게 쪼개서 threshold를 각각 구함)\nimport numpy as np\nimport cv2\n\ndef adaptive_threshold():\n imgfile = '../images/document.jpg'\n img = cv2.imread(imgfile, cv2.IMREAD_GRAYSCALE)\n \n # Resize image\n r = 600.0 / img.shape[0]\n dim = (int(img.shape[1] * r), 600)\n img = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)\n \n # Blur image and apply adaptive threshold\n # GaussianBlur(적용할 img, (주변픽셀크기), 0)\n blur = cv2.GaussianBlur(img, (5, 5), 0) # 주변 픽셀의 평균값을 대입하여 블러효과, 주변픽셀크기가 크면 클수록 이미지가 뭉개짐\n \n # adaptiveThreshold(img ,threshold 최대값, algo, algo, 쪼개는 블럭정도, 주변밝기를 빼는 상수값)\n result_without_blur = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 21, 10) # 보통 21,10을 사용\n #result_with_blur = cv2.adaptiveThreshold(blur, 255, cv2.ADAPTIVE_THRESH, cv2.THRESH_BINARY, 21, 10)\n result_with_blur = cv2.adaptiveThreshold(blur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 21, 10)\n \n cv2.imshow('Without Blur', result_without_blur)\n cv2.imshow('With Blur', result_with_blur)\n \n cv2.waitKey(0)\n cv2.destroyAllWindows()\n cv2.waitKey(1)\n \nif __name__ == '__main__':\n adaptive_threshold() \n \n ","sub_path":"OpenCV_namecard/ch04_scan_effect/threshold2.py","file_name":"threshold2.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"646162475","text":"import numpy as np\r\nimport pandas as pd\r\n\r\ndef log_returns(data, day_offset = 365, fill = False):\r\n df = data.copy()\r\n \r\n if fill:\r\n date_range = pd.date_range(df.index.min(), df.index.max(), freq = 'D')\r\n df_pp = df.reindex(date_range).fillna(method = 'ffill')\r\n else:\r\n df_pp = df.copy()\r\n \r\n df_pp.index = df_pp.index + pd.DateOffset(days = day_offset)\r\n df_pp.columns = [c + '_PP' for c in df_pp.columns] \r\n \r\n growth_rates = df.join(df_pp, how = 'inner')\r\n \r\n for c in data.columns:\r\n growth_rates[c] = np.log(growth_rates[c] / growth_rates[c + '_PP'])\r\n \r\n growth_rates = growth_rates.loc[:, data.columns]\r\n \r\n return growth_rates\r\n \r\ndef get_sales(action):\r\n sale = np.array(action)\r\n sale[np.where(sale > 0)] = 0\r\n sale = abs(sale)\r\n \r\n return sale\r\n\r\ndef get_purchases(action):\r\n purchase = np.array(action)\r\n purchase[np.where(purchase < 0)] = 0\r\n \r\n return purchase\r\n \r\ndef get_current_prices(obs, n):\r\n return obs[(n + 1): (n * 2 + 1)]\r\n\r\ndef get_positions(obs, n):\r\n return obs[1: n + 1]\r\n\r\ndef get_cash_balance(obs):\r\n return obs[0]\r\n\r\ndef get_portfolio_value(obs, n):\r\n v = np.dot(get_positions(obs, n), get_current_prices(obs, n))\r\n v += get_cash_balance(obs)\r\n \r\n return v","sub_path":"marketenv/common/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"471020981","text":"import numpy as np\nimport tensorflow as tf\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.applications.vgg16 import preprocess_input\nfrom keras.models import Sequential\nfrom keras.layers import Dropout, Flatten, Dense\nfrom keras import applications\nfrom keras.optimizers import SGD, Adam, RMSprop, Nadam\nimport csv\nfrom matplotlib import pyplot as plt\nimport datetime\nimport os\nimport pandas as pd\nimport shutil\nfrom keras.layers import Convolution2D, Dense, Input, Flatten, Dropout, MaxPooling2D, BatchNormalization, GlobalAveragePooling2D, Concatenate\nfrom sklearn.metrics import roc_curve, auc\nfrom keras import regularizers\n\n#gpu = tf.config.experimental.list_physical_devices('GPU')\n#tf.config.experimental.set_memory_growth(gpu[0], True)\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsession = tf.Session(config=config)\n\ndef load_labels(csv_file):\n labels = []\n image_name = []\n with open(csv_file, 'r') as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n for row in reader:\n labels.append(float(row[1]))\n image_name.append(row[2])\n return labels, image_name\n \n\ndef load_predictions(csv_file):\n labels = []\n image_name = []\n with open(csv_file, 'r') as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n for row in reader:\n labels.append(row[1])\n image_name.append(row[2])\n \n return labels, image_name\n\n\ndef copy_files(initial_dir, final_dir):\n subfolders_initial = os.listdir(initial_dir)\n subfolder_final = os.listdir(final_dir)\n for folder in subfolders_initial:\n image_list = os.listdir(initial_dir + folder)\n for image in image_list:\n file_name = ''.join([initial_dir, folder, '/', image])\n destination = ''.join([final_dir, folder, '/', image])\n print(file_name)\n shutil.copyfile(file_name, destination)\n\n\ndef load_pictures_1(directory):\n directory = directory\n lista = [f for f in os.listdir(directory)]\n imgs = np.zeros([len(lista), 100, 100, 3])\n\n for i, image in enumerate(lista):\n img = misc.imread(''.join([directory, image]))\n if np.array_equal(np.shape(img), (100, 100, 3)):\n imgs[i] = img\n else:\n img = transform.resize(img, (100, 100, 3))\n imgs[i] = img\n\n array = np.array(imgs)\n array.reshape(len(imgs), 100, 100, 3)\n # return np.array(imgs[:])\n return array, lista\n\n\ndef calculate_auc_and_roc(predicted, real, plot=False):\n y_results, names = load_predictions(predicted)\n y_2test, names_test = load_labels(real)\n\n # y_results, names = gf.load_predictions('Inception_predictions.csv')\n # y_2test, names_test = gf.load_labels('Real_values_test.csv')\n y_test = []\n y_pred = []\n\n print(len(y_results), len(names))\n print(len(y_2test), len(names_test))\n\n for i, name in enumerate(names):\n for j, other_name in enumerate(names_test):\n if name == other_name:\n y_pred.append(float(y_results[i]))\n y_test.append(int(y_2test[j]))\n\n fpr_keras, tpr_keras, thresholds_keras = roc_curve(y_test, y_pred)\n\n auc_keras = auc(fpr_keras, tpr_keras)\n\n if plot is True:\n plt.figure()\n plt.plot([0, 1], [0, 1], 'k--')\n plt.plot(fpr_keras, tpr_keras, label='Keras (area = {:.3f})'.format(auc_keras))\n # plt.plot(fpr_rf, tpr_rf, label='RF (area = {:.3f})'.format(auc_rf))\n plt.xlabel('False positive rate')\n plt.ylabel('True positive rate')\n plt.title('ROC curve')\n plt.legend(loc='best')\n\n return auc_keras\n\n\ndef main(train_data_dir, validation_data_dir, test_data_dir_1, idx=0, value=0.001, plot=False):\n # ------------------------directories of the datasets -------------------------------\n\n\n # ---------------- load a base model --------------------------\n\n img_width, img_height = 150, 150\n ROWS = img_width\n COLS = img_height\n \n train_idg = ImageDataGenerator(rescale = 1./255, \n fill_mode ='nearest')\n val_idg = ImageDataGenerator(rescale = 1./255, \n fill_mode ='nearest')\n test_idg = ImageDataGenerator(rescale = 1./255, \n fill_mode ='nearest')\n test_idg2 = ImageDataGenerator(rescale = 1./255, \n fill_mode ='nearest')\n \n #train_idg = ImageDataGenerator(preprocessing_function=preprocess_input)\n #val_idg = ImageDataGenerator(preprocessing_function=preprocess_input)\n #test_idg = ImageDataGenerator(preprocessing_function=preprocess_input)\n #test_idg2 = ImageDataGenerator(preprocessing_function=preprocess_input)\n\n # ------generators to feed the model----------------\n\n train_gen = train_idg.flow_from_directory(train_data_dir,\n target_size=(ROWS, COLS),\n batch_size = 50)\n\n validation_gen = val_idg.flow_from_directory(validation_data_dir,\n target_size=(ROWS, COLS),\n batch_size = 50)\n \n lenv_test1 = len(os.listdir(test_data_dir_1)) \n test_gen = test_idg.flow_from_directory(test_data_dir_1, \n target_size=(ROWS, COLS), \n shuffle=False,\n batch_size = 50)\n\n # build the VGG16 network\n base_model = applications.VGG16(include_top=False, weights='imagenet') \n base_model.trainable = False\n base_model.summary()\n \n # -----------here begins the important --------------------------\n nclass = len(train_gen.class_indices)\n model = Sequential() \n model.add(base_model)\n model.add(GlobalAveragePooling2D())\n #model.add(Flatten()) \n model.add(Dense(2048, activation='relu'))\n #model.add(Dense(2048, activation='relu'))\n #model.add(Dense(2048, activation='relu', kernel_regularizer=regularizers.l2(value)))\n model.add(Dense(nclass, activation='softmax'))\n\n # optimizers\n\n adam = Adam(lr=0.001)\n sgd = SGD(lr=0.001, momentum=0.9)\n rms = 'rmsprop'\n # train the model\n \n model.compile(optimizer=rms, loss='categorical_crossentropy', metrics=['accuracy'])\n model.summary() \n model.fit_generator(train_gen, \n epochs = 1,\n shuffle=1,\n steps_per_epoch = 50,\n validation_steps = 50,\n validation_data = validation_gen, \n verbose=1)\n \n for layer in base_model.layers[:-4]:\n layer.trainable = False\n\n for layer in base_model.layers[-4:]:\n layer.trainable = True\n \n\n model.compile(loss='categorical_crossentropy', \n optimizer=adam,\n metrics=['acc'])\n \n model.summary()\n\n\n history = model.fit_generator(train_gen, \n epochs = 2,\n shuffle=1,\n steps_per_epoch = 50,\n validation_steps = 50,\n validation_data = validation_gen, \n verbose=1)\n\n # --------------- evaluate the model -----------------\n\n val_idg = ImageDataGenerator(rescale = 1./255, \n fill_mode ='nearest')\n validation_gen = val_idg.flow_from_directory(validation_data_dir,\n target_size=(img_width, img_height),\n batch_size=50)\n\n evaluation = model.evaluate_generator(validation_gen, verbose=True, steps=10)\n print(evaluation, 'Validation dataset')\n\n test_idg = ImageDataGenerator(rescale = 1./255, \n fill_mode ='nearest')\n test_gen = test_idg.flow_from_directory(test_data_dir_1,\n target_size=(img_width, img_height),\n shuffle=False,\n batch_size = 269)\n\n evaluation_0 = model.evaluate_generator(test_gen, verbose=True, steps=1)\n print(evaluation_0, 'evaluation 0 dataset')\n\n\n###-----------------------lets make predictions-------------------\n predicts = model.predict_generator(test_gen, verbose = True, steps=1)\n \n #print(len(predicts))\n #print(predicts[:270])\n #print('second part')\n #print(predicts[270:])\n x_0 = [x[0] for x in predicts]\n x_1 = [x[1] for x in predicts]\n names = [os.path.basename(x) for x in test_gen.filenames]\n print(len(x_0), len(names))\n \n predicts = np.argmax(predicts, \n axis=1)\n label_index = {v: k for k,v in train_gen.class_indices.items()}\n predicts = [label_index[p] for p in predicts]\n\n df = pd.DataFrame(columns=['class_1', 'class_2', 'fname', 'over all'])\n df['fname'] = [os.path.basename(x) for x in test_gen.filenames]\n df['class_1'] = x_0\n df['class_2'] = x_1\n df['over all'] = predicts\n name_save_predictions_1 = ''.join(['predictions_VGG_', str(idx), '_', str(value), '_.csv']) \n df.to_csv(name_save_predictions_1, index=False)\n \n \n # -------------------------predictions on the validation set --------------------------\n\n test_idg2 = ImageDataGenerator(rescale = 1./255, \n fill_mode ='nearest')\n va_gen2 = test_idg2.flow_from_directory(validation_data_dir, \n target_size=(ROWS, COLS), \n shuffle=False,\n batch_size = 10) \n \n predict3 = model.predict_generator(va_gen2, verbose = True, steps=39)\n \n #print(len(predicts))\n #print(predicts[:270])\n #print('second part')\n #print(predicts[270:])\n x_0 = [x[0] for x in predict3]\n x_1 = [x[1] for x in predict3]\n names = [os.path.basename(x) for x in va_gen2.filenames[:4000]]\n print(len(x_0), len(names))\n \n predict3 = np.argmax(predict3, axis=1)\n label_index = {v: k for k,v in va_gen2.class_indices.items()}\n predict3 = [label_index[p] for p in predict3]\n \n #df = pd.DataFrame(columns=['class_1', 'class_2', 'fname', 'over all'])\n #df['fname'] = names\n #df['class_1'] = x_0\n #df['class_2'] = x_1\n #df['over all'] = predict3\n #name_save_predictions_3 = ''.join(['predictions_VGG_val_dataset', '_', str(idx), '_', str(value), '_.csv'])\n #df.to_csv(name_save_predictions_3, index=False)\n \n # -----------now lets calculate the AUC---------------------------------\n\n current_wroking_directory = os.getcwd()\n \n real_test = ''.join([current_wroking_directory, '/GNB2020/test_CapsNets/data/Real_values_test.csv'])\n auch_0 = calculate_auc_and_roc(name_save_predictions_1, real_test)\n print(auch_0, 'test dataset')\n \n #real_val = ''.join([current_wroking_directory, '/GNB2020/test_CapsNets/data/Real_values_validation.csv'])\n #auch_1 = calculate_auc_and_roc(name_save_predictions_2, real_val)\n #print(auch_1, 'validation dataset')\n\n\n # ----------------- save results ---------------------------\n\n today = datetime.datetime.strftime(datetime.datetime.today(), '%Y%m%d-%Hh%mm') \n model.save_weights(''.join(['weights_vgg_',today,'_dropout_',str(value),'_.h5']), True)\n\n with open(''.join(['Results_training', today,'_l2norm_', str(value), '_.csv']), 'w') as csvfile:\n writer = csv.writer(csvfile, delimiter = ',' )\n writer.writerow(['Acc', 'Val_acc', 'Loss', 'Val_Loss'])\n for i, num in enumerate(history.history['acc']):\n writer.writerow([num, history.history['val_acc'][i], history.history['loss'][i], history.history['val_loss'][i]])\n\n if plot is True:\n plt.figure()\n \"\"\"\n plt.plot([0, 1], [0, 1], 'k--')real_val\n plt.plot(fpr_keras, tpr_keras, label='Keras (area = {:.3f})'.format(auc_keras))\n # plt.plot(fpr_rf, tpr_rf, label='RF (area = {:.3f})'.format(auc_rf))\n plt.xlabel('False positive rate')\n plt.ylabel('True positive rate')\n plt.title('ROC curve')\n plt.legend(loc='best')\n\n # Zoom in view of the upper left corner.\n plt.figure()\n plt.xlim(0, 0.2)\n plt.ylim(0.8, 1)\n plt.plot([0, 1], [0, 1], 'k--')\n plt.plot(fpr_keras, tpr_keras, label='Keras (area = {:.3f})'.format(auc_keras))\n # plt.plot(fpr_rf, tpr_rf, label='RF (area = {:.3f})'.format(auc_rf))\n plt.xlabel('False positive rate')\n plt.ylabel('True positive rate')\n plt.title('ROC curve (zoomed in at top left)')\n plt.legend(loc='best')\n plt.show()\"\"\"\n\n\nif __name__ == \"__main__\":\n\n #initial_dir = '/home/jl/aerial_photos_plus/'\n #folders = os.listdir(initial_dir)\n current_wroking_directory = os.getcwd()\n test_directory= ''.join([current_wroking_directory, '/test_CapsNets/data/test/'])\n\n train_dir = ''.join([current_wroking_directory, '/test_CapsNets/data/training_validation/training/'])\n val_dir = ''.join([current_wroking_directory,'/test_CapsNets/data/training_validation/validation/'])\n\n indx = 0\n value = 0\n main(train_dir, val_dir, test_directory, indx)\n\n\n","sub_path":"scripts/vgg/vgg_retrain.py","file_name":"vgg_retrain.py","file_ext":"py","file_size_in_byte":13331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"67090351","text":"from google.appengine.ext import db\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp import template\nfrom google.appengine.ext.webapp.util import run_wsgi_app\nfrom google.appengine.api import users\nfrom datetime import datetime, timedelta, date\nfrom google.appengine.api import users\nimport string, re, hashlib, urllib, random\nimport re\n\ndef filter_tags(htmlstr):\n\n re_cdata=re.compile('//]*//\\]\\]>',re.I)\n re_script=re.compile('<\\s*script[^>]*>[^<]*<\\s*/\\s*script\\s*>',re.I)\n re_style=re.compile('<\\s*style[^>]*>[^<]*<\\s*/\\s*style\\s*>',re.I)\n re_br=re.compile('')\n re_h=re.compile(']*>')\n re_comment=re.compile('')\n s=re_cdata.sub('',htmlstr)\n s=re_script.sub('',s) \n s=re_style.sub('',s)\n s=re_br.sub('\\n',s)\n s=re_h.sub('',s) \n s=re_comment.sub('',s)\n \n blank_line=re.compile('\\n+')\n s=blank_line.sub('\\n',s)\n s=replaceCharEntity(s)\n return s\n\n\ndef replaceCharEntity(htmlstr):\n CHAR_ENTITIES={'nbsp':' ','160':' ',\n 'lt':'<','60':'<',\n 'gt':'>','62':'>',\n 'amp':'&','38':'&',\n 'quot':'\"','34':'\"',}\n \n re_charEntity=re.compile(r'&#?(?P\\w+);')\n sz=re_charEntity.search(htmlstr)\n while sz:\n entity=sz.group()\n key=sz.group('name')\n try:\n htmlstr=re_charEntity.sub(CHAR_ENTITIES[key],htmlstr,1)\n sz=re_charEntity.search(htmlstr)\n except KeyError:\n \n htmlstr=re_charEntity.sub('',htmlstr,1)\n sz=re_charEntity.search(htmlstr)\n return htmlstr\n\ndef repalce(s,re_exp,repl_string):\n return re_exp.sub(repl_string,s)\n\nclass article(db.Model):\n\ttitle=db.TextProperty()\t\n\ttitlePIC=db.IntegerProperty()\n\tcontentPIC=db.IntegerProperty()\n\tcontent=db.TextProperty()\n\tdatetime=db.DateTimeProperty() \n\tdate=db.TextProperty()\n\ttime=db.TextProperty()\n\tauthor = db.UserProperty()\n\tid = db.IntegerProperty()\n\tdef setdatetime(self):\n\t\tself.datetime = datetime.utcnow() + timedelta(hours=+8)\n\t\tself.date = str(self.datetime)[0:10]\n\t\tself.time = str(self.datetime)[11:16]\t\n\t\nclass comment(db.Model):\n\tok = db.BooleanProperty()\n\tauthor = db.TextProperty()\n\tname = db.TextProperty()\n\temail = db.TextProperty()\n\tpic = db.TextProperty()\n\turl = db.TextProperty()\t\n\tcomment = db.TextProperty()\n\taid = db.IntegerProperty()\n\tatitle = db.TextProperty()\t\n\tid = db.IntegerProperty()\n\tdatetime = db.DateTimeProperty() \t\n\tdate=db.TextProperty()\n\ttime=db.TextProperty()\t\n\tdef setdatetime(self):\n\t\tself.datetime = datetime.utcnow() + timedelta(hours=+8)\n\t\tself.date = str(self.datetime)[0:10]\n\t\tself.time = str(self.datetime)[11:16]\t\n\tip = db.TextProperty()\t\n\t\nclass link(db.Model):\n\turl = db.TextProperty()\n\tid = db.IntegerProperty()\n\tname = db.TextProperty()\n\tdatetime = db.DateTimeProperty() \n\tdate=db.TextProperty()\n\ttime=db.TextProperty()\t\n\tdef setdatetime(self):\n\t\tself.datetime = datetime.utcnow() + timedelta(hours=+8)\n\t\tself.date = str(self.datetime)[0:10]\n\t\tself.time = str(self.datetime)[11:16]\n\t\t\n\t\nclass interview(db.Model):\n\tdatetime = db.DateTimeProperty() \n\tdate=db.TextProperty()\n\ttime=db.TextProperty()\n\tdef setdatetime(self):\n\t\tself.datetime = datetime.utcnow() + timedelta(hours=+8)\n\t\tself.date = str(self.datetime)[0:10]\n\t\tself.time = str(self.datetime)[11:16]\n\tip = db.TextProperty()\n\tid = db.IntegerProperty()\n\turl = db.TextProperty()\n\t\n\nclass show_page(webapp.RequestHandler):\n\tdef get(self):\t\n\t\tarts = article.all().order('-datetime')\t\t\n\t\trc_cmts = comment.all().order('-datetime').filter('ok = ', True).fetch(limit=5)\n\t\tlnks = link.all().order('datetime')\n\t\tparameters = {'lnks':lnks, 'rc_cmts':rc_cmts}\n\t\tif self.request.get('aid'):\t\t\t\t\n\t\t\tid = int(self.request.get('aid'))\n\t\t\tart = arts.filter('id =',int(id)).get()\n\t\t\tcmts = comment.all().order('datetime').filter('aid =',id).filter('ok = ', True)\n\t\t\tparameters['art'] = art\n\t\t\tparameters['cmts'] = cmts\n\t\t\thtml = template.render('article.html',parameters)\n\t\telse:\n\t\t\tif self.request.get('bid'):\n\t\t\t\tbid = int(self.request.get('bid'))\t\t\t\t\n\t\t\telse:\n\t\t\t\tbid = 0\n\t\t\tpageNb = 5\n\t\t\ttot = arts.count()\n\t\t\tif tot%pageNb == 0:\n\t\t\t\ttot = tot/pageNb-1\n\t\t\telse:\n\t\t\t\ttot = tot/pageNb\t\t\t\n\t\t\tarts = arts.fetch(pageNb, bid*pageNb)\t\n\t\t\tbid_new = bid-1\n\t\t\tbid_ord = bid+1\n\t\t\tparameters['arts'] = arts\n\t\t\tparameters['bid'] = bid\n\t\t\tparameters['bid_new'] = bid_new\n\t\t\tparameters['bid_old'] = bid_ord\n\t\t\tparameters['tot'] = tot\n\t\t\thtml = template.render('index.html',parameters)\n\t\tself.response.out.write(html)\n\t\t\nclass class_comment(webapp.RequestHandler):\n\tdef post(self):\n\t\tif self.request.get('author') and self.request.get('email') and self.request.get('comment'):\t\n\t\t\t\n\t\t\tcmt = comment()\n\t\t\tcmt.author = self.request.get('author')\n\t\t\tcmt.email = self.request.get('email')\n\t\t\t\t\t\n\t\t\tsize = 50\t\t\t\n\t\t\tcmt.pic = \"http://www.gravatar.com/avatar/\" + hashlib.md5(cmt.email.lower()).hexdigest() + \"?\"\n\t\t\tcmt.pic += urllib.urlencode({'d':'monsterid', 's':str(size)})\n\t\t\t\n\t\t\t\n\t\t\tif self.request.get('url'):\n\t\t\t\tcmt.url = self.request.get('url')\n\t\t\t\tcmt.name = '' + cmt.author + ''\n\t\t\telse:\n\t\t\t\tcmt.name = cmt.author\n\t\t\t\t\n\t\t\tcmt.comment = filter_tags(self.request.get('comment'))\n\t\t\tcmt.aid = int(self.request.get('aid'))\n\t\t\tcmt.atitle = article.all().filter('id =',cmt.aid).get().title\n\t\t\tcmt.setdatetime()\n\t\t\tcmt.id = comment.all().count()+1\n\t\t\tcmt.ip = self.request.remote_addr\n\t\t\tcmt.ok = False\n\t\t\tcmt.put()\t\t\t\n\t\t\tself.redirect(\"?aid=\"+str(cmt.aid))\n\t\telse:\t\t\t\n\t\t\tself.response.out.write('please comment necessary information back')\n\t\t\t\nclass redirect_404(webapp.RequestHandler):\n\tdef get(self):\t\n\t\tself.redirect(\"404\")\n\t\t\nclass show_404(webapp.RequestHandler):\n\tdef get(self):\n\t\tself.response.set_status(404)\n\t\thtml = template.render('404.html',{})\n\t\tself.response.out.write(html)\n\t\t\napplication = webapp.WSGIApplication([\n\t\t\t\t\t\t\t\t\t ('/comment', class_comment),\n\t\t\t\t\t\t\t\t\t ('/', show_page),\n\t\t\t\t\t\t\t\t\t ('/404', show_404),\n\t\t\t\t\t\t\t\t\t ('/.*', redirect_404),\n\t\t\t\t\t\t\t\t\t\t],\n debug=True)\ndef main():\n run_wsgi_app(application)\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"615375304","text":"import difflib\nimport json\nfrom unittest import TestCase\n\nimport networkx as nx\n\nfrom examples.sales import schema\nfrom star_alchemy.contrib.networkx import to_nx\n\n\nclass ToNxTestCase(TestCase):\n\n def test_to_nx(self):\n G = to_nx(schema)\n actual = json.dumps(nx.tree_data(G, schema.name), indent=4)\n expected = json.dumps(indent=4, obj={\n 'name': 'sale',\n 'id': 'sale',\n 'children': [\n {\n 'name': 'product',\n 'id': 'product',\n 'children': [\n {'name': 'category', 'id': 'category'}\n ]\n },\n {\n 'name': 'employee',\n 'id': 'employee',\n 'children': [\n {'name': 'department', 'id': 'department'},\n {'name': 'employee_location', 'id': 'employee_location'}\n ]\n },\n {\n 'name': 'customer',\n 'id': 'customer',\n 'children': [\n {'name': 'customer_location', 'id': 'customer_location'}\n ]\n }\n ]\n })\n if actual != expected:\n actual_lines = actual.split('\\n')\n expected_lines = expected.split('\\n')\n diff = difflib.unified_diff(actual_lines, expected_lines)\n self.fail('\\n'.join(diff))\n","sub_path":"tests/test_contrib_networkx.py","file_name":"test_contrib_networkx.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"381856015","text":"#! /usr/bin/env python3\n#---------------------------------------------------------------------------\n\nimport logging\nimport threading\nimport argparse\n\nimport json\nimport base64, binascii\nimport struct\n\nimport select\nimport socket\nimport time\nimport sched\n\nimport warnings\nimport requests\n\nimport bottle # pip install bottle || wget https://bottlepy.org/bottle.py\nfrom bottle import post, request, response\n\ntry:\n import tornado.ioloop\n import tornado.web\n with_tornado = True\nexcept:\n print(\"cannot import tornado\")\n with_tornado = False\n\nimport sys\nsys.path.append(\"../../PLIDO-tanupoo\")\nimport fragment\n\nfrom fraglogic import WindowAckModeSender\n\n#---------------------------------------------------------------------------\n\ndef bytes_to_hex(data, with_new_line=False, with_repr=True):\n result = \"\"\n for i,b in enumerate(bytearray(data)):\n if i == 0:\n pass\n elif i%16 == 0:\n if with_new_line:\n result += \"\\n\"\n else: result += \" \"\n else: result += \" \"\n result += \"%02x\" % b\n if with_repr:\n result += \" \"+repr(data)\n return result\n\n#---------------------------------------------------------------------------\n\nclass FragmentationManager:\n \"\"\"The fragmentation manager handles the logic of the fragment sending etc.\n \"\"\"\n \n def __init__(self):\n self.nb_bit_bitmap = 1\n self.max_fcn_per_window = self.nb_bit_bitmap - 1 # included\n \n self.window = 0\n self.fcn = self.max_fcn_per_window # protocol FCN\n self.fragment_index = 0 #\n self.content = None\n self.state = \"init\"\n\n def event_packet(self, raw_packet):\n if self.state == \"init\":\n print(\"(ignored) Dev. packet:\", repr(raw_packet))\n self.state = \"fragment\"\n self.content = [\"to be\", \"or not to\", \" be, that's\", \"the question\"]\n return self.get_current_fragment()\n elif self.state == \"fragment\":\n return self.process_ack(raw_packet)\n else: raise ValueError(\"bad state\", self.state)\n\n def get_current_fragment(self):\n print(\"fragment window={} fcn={} current_frag_index={}\".format(\n self.window, self.fcn, self.fragment_index))\n header = struct.pack(b\"!BB\", self.window, self.fcn)\n return header + bytes(self.content[self.fragment_index].encode(\"ascii\"))\n\n def process_ack(self, raw_packet):\n print(\"process_ack\", bytes_to_hex(raw_packet))\n if len(raw_packet) != struct.calcsize(\"!BB\"):\n print(\"XXX: bad ack size\", len(raw_packet))\n return b\"XXX:bad\"\n window, bitmap = struct.unpack(\"!BB\", raw_packet)\n print(\"window={}, bitmap={}\".format(window, bitmap))\n if window != self.window:\n print(\"warning: bad window number\", window, self.window)\n return b\"XXX:bad-window\"\n if bitmap != 1: #XXX\n print(\"warning: incomplete bitmap\", bitmap, 1)\n return b\"XXX:bad-bitmap\"\n\n # Next fragment\n self.window = (self.window+1) % 2 # protocol\n # - because it will be the first of the new window:\n self.fcn = self.max_fcn_per_window \n self.fragment_index += 1 # internal data structure\n\n if self.fragment_index == len(self.content):\n print(\"Finished trasnmission of fragments\")\n return b\"\"\n\n if self.fragment_index == len(self.content)-1:\n # protocol - because it is the end of the content in this case:\n self.fcn = 1 \n return self.get_current_fragment() # XXX + \"MIC\"\n else:\n return self.get_current_fragment()\n\n#---------------------------------------------------------------------------\n\n# FRAGMENT_FORMAT = {\n# # 0|0|12345678|12345678\n# \"hdr_size\": 16,\n# \"rid_size\": 0,\n# \"rid_shift\": 0,\n# \"rid_mask\": 0x0000,\n# \"dtag_size\": 0,\n# \"dtag_shift\": 0,\n# \"dtag_mask\": 0x0000,\n# \"win_size\": 1,\n# \"win_shift\": 8,\n# \"win_mask\": 0x0100,\n# \"fcn_size\": 1,\n# \"fcn_shift\": 0,\n# \"fcn_mask\": 0x01,\n# }\n\nFRAGMENT_FORMAT = fragment.fp_ietf100_win\n\nclass SystemManager:\n def add_event(self, rel_time, callback, args):\n XXX\n\n def send_packet(self, packet):\n XXX\n\n# {'srcbuf': b'The crow has flown away:\\nswaying in the evening sun,\\naleafless tree.', 'max_fcn': 255, 'win_size': 255, 'win_mask': 57896044618658097711785492504343953926634992332820282019728792003956564819967, 'fcn': 255, 'end_of_fragment': 255, 'base_hdr': 256}\n\n\n\nclass SimulSystemManager:\n def __init__(self):\n self.scheduler = sched.scheduler(self.get_clock, self.wait_delay)\n self.clock = 0\n self.receive_packet_callback = None\n self.inject_receive_list = []\n \n # sched.scheduler API\n \n def get_clock(self):\n return self.clock\n\n def wait_delay(self, delay):\n self.clock += delay\n\n def run(self):\n self.scheduler.run()\n\n # external API\n \n def add_event(self, rel_time, callback, args):\n self.scheduler.enter(rel_time, 0, callback, args)\n\n def send_packet(self, packet):\n #print(\"SEND:\", bytes_to_hex(packet))\n print(\"SEND\", packet)\n if len(self.inject_receive_list) > 0:\n inject_packet = self.inject_receive_list.pop(0)\n if self.receive_packet_callback != None:\n print(\"injected packet:\", inject_packet)\n self.add_event(0, self.receive_packet_callback,\n (inject_packet,))\n\n def set_receive_packet_callback(self, callback):\n self.receive_packet_callback = callback\n\n def set_inject_receive_list(self, packet_list):\n self.inject_receive_list = packet_list[:]\n \n \nclass RealTimeSystemManager:\n \"\"\"\n Manage event queue in real time\n Send and receive packet from an UDP port\n \"\"\"\n def __init__(self, dest_address_and_port, listen_port=None, time_scale=1):\n self.time_t0 = time.time()\n self.time_scale = time_scale\n self.receive_packet_callback = None\n self.scheduler = sched.scheduler(self.get_clock, self.wait_delay)\n self.destination = dest_address_and_port\n \n self.sd = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n if listen_port == None:\n unused, listen_port = dest_address_and_port\n print(\"UDP listening on port {}\".format(listen_port))\n self.sd.bind((\"\", listen_port))\n \n self.add_event(1e8, \"should not be called\", ())\n self.inject_receive_done = False\n self.inject_receive_list = []\n\n def set_inject_receive_list(self, packet_list):\n self.inject_receive_list = packet_list[:]\n\n def set_receive_packet_callback(self, callback):\n self.receive_packet_callback = callback\n \n def get_clock(self):\n return self.time_t0 + (time.time()-self.time_t0) * self.time_scale\n\n def wait_delay(self, delay):\n #delay *= self.time_scale\n # Note: we might wait for less time than expected if packet received\n if len(self.inject_receive_list) > 0 and not self.inject_receive_done:\n inject_packet = self.inject_receive_list.pop(0)\n self.inject_receive_done = True\n if inject_packet != None and self.receive_packet_callback != None:\n print(\"injected packet:\", inject_packet)\n self.receive_packet_callback(inject_packet)\n return\n\n read_list,unused,unused = select.select([self.sd],[],[], delay)\n self.inject_receive_done = False\n if len(read_list) > 0:\n assert read_list[0] is self.sd\n port = self.destination[1]\n packet, address_and_port = self.sd.recvfrom(2**16)\n if self.receive_packet_callback != None:\n self.receive_packet_callback(packet)\n\n def run(self):\n self.scheduler.run()\n \n def add_event(self, rel_time, callback, args):\n self.scheduler.enter(rel_time, 0, callback, args)\n\n def send_packet(self, packet):\n #print(\"SEND:\", bytes_to_hex(packet))\n print(\"SEND\", packet)\n self.sd.sendto(packet, self.destination)\n\n\ndef test_real_time_system_manager(args):\n system = RealTimeSystemManager((args.address, args.port), args.listen_port)\n start_time = time.time()\n def periodic_display_function():\n elapsed_time = system.get_clock()-start_time\n print(\"current time={} - display\".format(elapsed_time))\n system.add_event(1.5, periodic_display_function, ())\n def periodic_send_function():\n elapsed_time = system.get_clock()-start_time\n print(\"current time={} - send\".format(elapsed_time))\n system.send_packet(\"\".format(elapsed_time))\n system.add_event(2, periodic_send_function, ())\n system.add_event(0, periodic_display_function, ())\n system.add_event(0, periodic_send_function, ()) \n system.run()\n\n \ndef test_window_ack_manager_internal():\n reply_list = [b\"\\x00\", b\"\\x01\"]\n \n simul_system_manager = SimulSystemManager()\n packet = b\"The crow has flown away:\\nswaying in the evening sun,\\naleafless tree.\"\n window_ack_manager = WindowAckModeSender(\n simul_system_manager, FRAGMENT_FORMAT, #fragment.fp,\n full_packet=packet, rule_id=0, dtag=0, window_size=7, fragment_size=4)\n\n def send_callback():\n #global window_ack_manager, simul_system_manager\n if len(reply_list) > 0:\n print(\"REPLY\")\n reply_packet = reply_list.pop(0)\n simul_system_manager.add_event(1, window_ack_manager.event_packet, (reply_packet,))\n simul_system_manager.send_callback = send_callback # XXX\n \n simul_system_manager.add_event(0, window_ack_manager.start, ())\n simul_system_manager.run()\n\ndef really_test_window_ack_manager(args, system):\n packet = ( b\"The crow has flown away: \"\n +b\"- swaying in the evening sun, \"\n +b\"- a leafless tree.\")\n window_ack_manager = WindowAckModeSender(\n system, FRAGMENT_FORMAT, #fragment.fp,\n full_packet=packet,\n rule_id=0, dtag=0,\n window_max_size=4, fragment_size=4)\n system.set_receive_packet_callback(window_ack_manager.event_packet)\n if args.inject:\n inject_receive_list = ([None]*12 + [b\"\\x00\\xfe\"] + [None]*20\n + [b\"\\x01\\xfe\"])\n system.set_inject_receive_list(inject_receive_list)\n system.add_event(0, window_ack_manager.start, ())\n system.run()\n\n \ndef test_window_ack_manager(args):\n system = RealTimeSystemManager((args.address, args.port),\n args.listen_port, args.time_scale)\n really_test_window_ack_manager(args, system)\n\ndef test_window_ack_manager_simul(args):\n system = SimulSystemManager() \n really_test_window_ack_manager(args, system)\n\n \n#---------------------------------------------------------------------------\n# POST packet processing\n\ndef process_packet(frag_manager, json_request):\n '''\n Processes one packet, in base64 in json_request[\"data\"]\n Returns the packet that should be sent back as a json structure, with \n at least {\"data\": , \"\"}\n '''\n post_request = json.loads(json_request)\n\n if \"data\" in post_request:\n raw_packet = binascii.a2b_base64(post_request[\"data\"])\n print(\">>>PACKET:\", bytes_to_hex(raw_packet))\n raw_reply_packet = frag_manager.event_packet(raw_packet)\n else:\n # This is a join\n print(\">>>>JOIN\")\n raw_reply_packet = b\"\"\n\n print(\"<< bottle is simpler (one file), problem is for scheduling timers\n# threading is probably needed\n\n@post('/')\ndef device_packet_handler():\n global frag_manager\n print(\"--- received data\")\n # https://stackoverflow.com/questions/14988887/reading-post-body-with-bottle-py\n response.set_header('Content-Type', 'application/json')\n raw_request = request.body.read()\n json_response = process_packet(frag_manager, raw_request)\n raw_response = json.dumps(json_response)\n return raw_response\n\n#bottle.run(host='localhost', port=3112, debug=True)\n\n#---------------------------------------------------------------------------\n# Tornado version\n\n# https://gist.github.com/cjgiridhar/3274687\ndef run_tornado(args):\n global frag_manager\n version = \"magicarpe\" if args.bis else \"green\"\n frag_manager = FragmentationManager(version)\n \n class Alive(tornado.web.RequestHandler):\n def get(self):\n self.write(\"server is alive\")\n \n class PostHandler(tornado.web.RequestHandler):\n def post(self):\n raw_request = self.request.body\n json_request = raw_request.decode(\"ascii\")\n json_response = process_packet(frag_manager, json_request)\n raw_response = json.dumps(json_response)\n self.write(raw_response)\n \n application = tornado.web.Application([\n (r\"/alive\", Alive),\n (r\"/\", PostHandler)\n ])\n\n application.listen(args.port, address=args.address)\n tornado.ioloop.IOLoop.instance().start()\n\n#---------------------------------------------------------------------------\n\ndef cmd_run_server(args):\n global frag_manager\n version = \"magicarpe\" if args.bis else \"green\"\n if not args.tornado:\n frag_manager = FragmentationManager(version) \n bottle.run(host=args.address, port=args.port, debug=args.debug)\n else:\n run_tornado(args)\n\n#---------------------------------------------------------------------------\n\ndef cmd_post(args):\n # http://docs.python-requests.org/en/master/user/quickstart/#make-a-request\n raw_packet = b\"hello-from-python\"\n packet_b64 = binascii.b2a_base64(raw_packet).decode(\"ascii\")\n s = json.dumps({\"data\":packet_b64, \"fport\":2})\n r = requests.post(\"http://{}:{}\".format(args.address, args.port), data = s)\n print(r.text)\n\n#---------------------------------------------------------------------------\n\ndef cmd_simple(args):\n # http://docs.python-requests.org/en/master/user/quickstart/#make-a-request\n if args.step == 0: raw_packet = b\"\\x00\\x00\"\n elif args.step == 1: raw_packet = b\"\\x00\\x01\"\n elif args.step == 2: raw_packet = b\"\\x01\\x01\"\n elif args.step == 3: raw_packet = b\"\\x00\\x01\"\n elif args.step == 4: raw_packet = b\"\\x01\\x01\"\n else: raise ValueError(\"unmanaged step\", args.step)\n\n packet_b64 = binascii.b2a_base64(raw_packet).decode(\"ascii\")\n s = json.dumps({\"data\":packet_b64, \"fport\":2})\n r = requests.post(\"http://{}:{}\".format(args.address, args.port), data = s)\n json_reply = json.loads(r.text)\n if \"data\" in json_reply:\n packet = binascii.a2b_base64(json_reply[\"data\"]).decode(\"ascii\")\n packet\n else: print(\"reply:\", r.text)\n\n#---------------------------------------------------------------------------\n\nparser = argparse.ArgumentParser()\nsubparsers = parser.add_subparsers(dest=\"command\")\n\nparser_server = subparsers.add_parser(\"server\", help=\"run as POST server\")\nparser_server.add_argument(\"--address\", default=\"0.0.0.0\")\nparser_server.add_argument(\"--port\", default=3112)\nparser_server.add_argument(\"--debug\", default=False, action=\"store_true\")\nparser_server.add_argument(\"--bis\", default=False, action=\"store_true\")\nparser_server.add_argument(\"--tornado\", default=False, action=\"store_true\")\n\nparser_post = subparsers.add_parser(\"post\", help=\"post a message\")\nparser_post.add_argument(\"--port\", default=3112)\nparser_post.add_argument(\"--address\", default=\"localhost\") \n\nparser_simple = subparsers.add_parser(\n \"simple\", help=\"send one step of simple fragmentation\")\nparser_simple.add_argument(\"--port\", default=3112)\nparser_simple.add_argument(\"--step\", type=int, default=0)\nparser_simple.add_argument(\"--address\", default=\"localhost\") \n\nparser_test_window_ack = subparsers.add_parser(\n \"simul-win-ack\", help=\"test window ack manager\")\nparser_test_window_ack.add_argument(\n \"--inject\", default=False, action=\"store_true\")\n\nparser_test_emul = subparsers.add_parser(\"test-emul\")\nparser_test_emul.add_argument(\"--address\", default=\"localhost\")\nparser_test_emul.add_argument(\"--port\", type=int, default=9999,\n help=\"destination port\")\nparser_test_emul.add_argument(\"--listen-port\", type=int, default=9999)\n\nparser_test_udp_window_ack = subparsers.add_parser(\n \"udp-win-ack\", help=\"test window ack manager\")\nparser_test_udp_window_ack.add_argument(\"--address\", default=\"localhost\")\nparser_test_udp_window_ack.add_argument(\n \"--port\", type=int, default=9999, help=\"destination port\")\nparser_test_udp_window_ack.add_argument(\"--data\", type=int, default=9999)\nparser_test_udp_window_ack.add_argument(\"--listen-port\", type=int, default=9999)\nparser_test_udp_window_ack.add_argument(\n \"--inject\", default=False, action=\"store_true\")\nparser_test_udp_window_ack.add_argument(\n \"--time-scale\", default=1, type=float) # not working\n\nargs = parser.parse_args()\n\nif args.command == \"server\":\n cmd_run_server(args)\nelif args.command == \"post\":\n cmd_post(args)\nelif args.command == \"simple\":\n cmd_simple(args)\nelif args.command == \"simul-win-ack\":\n test_window_ack_manager_simul(args)\nelif args.command == \"udp-win-ack\":\n test_window_ack_manager(args) \nelif args.command == \"test-emul\":\n test_real_time_system_manager(args)\nelse: raise ValueError(\"bad command name\", args.command)\n \n#---------------------------------------------------------------------------\n","sub_path":"src/old/fragserver.py","file_name":"fragserver.py","file_ext":"py","file_size_in_byte":17820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"495940142","text":"ss = \"Python is Easy. 그래서 programming이 재밌습니다.\"\n\nst = \"\"\n\nfor v in ss :\n\tif ord(v) >= ord('a') and ord(v) <= ord('z') :\n\t\tst = chr(ord(v)+ 32)\n\t\t\nprint(chr(66))\nprint(ss.upper(), ss.lower(), ss.swapcase(), ss.title(), sep=\"\\n\")","sub_path":"0508/string3.py","file_name":"string3.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"365739629","text":"import os.path\nimport tensorflow as tf\nimport helper\nimport warnings\nfrom distutils.version import LooseVersion\nimport project_tests as tests\n\n# Added imports\nimport time\nimport scipy\nfrom moviepy.editor import VideoFileClip\nimport numpy as np\n\n# General Parameters\nnum_classes = 2\nimage_shape = (160, 576)\ndata_dir = './data'\nruns_dir = './runs'\nkernel_initializer = 1e-2 # 2e-2\nkernel_regularizer = 1e-3 # 1e-2, 1e-3, 1e-4, 1e-5\nkernel_size = 4\nstrides = (2, 2)\npadding='same'\nk_prob = 0.95 # 0.8, 0.9, 1.0\nl_rate = 1e-3 # 1e-2, 1e-3, 1e-4\nepochs = 20 # 1, 2, 10, 20\nbatch_size = 5 # 1, 2, 4, 5\ninput_file = './driving.mp4'\noutput_file = './output.mp4'\n\ndef debug(layer, shape):\n ''' Debugging '''\n tf.Print(layer, shape)\n\n# Check TensorFlow Version\nassert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer. You are using {}'.format(tf.__version__)\nprint('TensorFlow Version: {}'.format(tf.__version__))\n\n# Check for a GPU\nif not tf.test.gpu_device_name():\n warnings.warn('No GPU found. Please use a GPU to train your neural network.')\nelse:\n print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))\n\ndef load_vgg(sess, vgg_path):\n \"\"\"\n Load Pretrained VGG Model into TensorFlow.\n :param sess: TensorFlow Session\n :param vgg_path: Path to vgg folder, containing \"variables/\" and \"saved_model.pb\"\n :return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3_out, layer4_out, layer7_out)\n \"\"\"\n\n vgg_tag = 'vgg16'\n \n vgg_input_tensor_name = 'image_input:0'\n vgg_keep_prob_tensor_name = 'keep_prob:0'\n vgg_layer3_out_tensor_name = 'layer3_out:0'\n vgg_layer4_out_tensor_name = 'layer4_out:0'\n vgg_layer7_out_tensor_name = 'layer7_out:0'\n \n tf.saved_model.loader.load(sess, [vgg_tag], vgg_path)\n graph = tf.get_default_graph()\n\n t_vgg_input_tensor_name = graph.get_tensor_by_name(vgg_input_tensor_name)\n t_vvgg_keep_prob_tensor_name = graph.get_tensor_by_name(vgg_keep_prob_tensor_name)\n t_vgg_layer3_out_tensor_name = graph.get_tensor_by_name(vgg_layer3_out_tensor_name)\n t_vgg_layer4_out_tensor_name = graph.get_tensor_by_name(vgg_layer4_out_tensor_name)\n t_vgg_layer7_out_tensor_name = graph.get_tensor_by_name(vgg_layer7_out_tensor_name)\n \n return t_vgg_input_tensor_name, t_vvgg_keep_prob_tensor_name, t_vgg_layer3_out_tensor_name, t_vgg_layer4_out_tensor_name, t_vgg_layer7_out_tensor_name\ntests.test_load_vgg(load_vgg, tf)\n\n\ndef layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):\n \"\"\"\n Create the layers for a fully convolutional network. Build skip-layers using the vgg layers.\n :param vgg_layer3_out: TF Tensor for VGG Layer 3 output\n :param vgg_layer4_out: TF Tensor for VGG Layer 4 output\n :param vgg_layer7_out: TF Tensor for VGG Layer 7 output\n :param num_classes: Number of classes to classify\n :return: The Tensor for the last layer of output\n \"\"\" \n\n # Added to skip ResourceExhaustedError, based on https://medium.com/@subodh.malgonde/transfer-learning-using-tensorflow-52a4f6bcde3e\n vgg_layer7_out = tf.stop_gradient(vgg_layer7_out)\n vgg_layer4_out = tf.stop_gradient(vgg_layer4_out)\n vgg_layer3_out = tf.stop_gradient(vgg_layer3_out)\n \n # Convolutions\n conv7_1x1 = tf.layers.conv2d(vgg_layer7_out, num_classes, 1, padding=padding,\n kernel_regularizer=tf.contrib.layers.l2_regularizer(kernel_regularizer),\n kernel_initializer=tf.truncated_normal_initializer(stddev=kernel_initializer))\n conv4_1x1 = tf.layers.conv2d(vgg_layer4_out, num_classes, 1, padding=padding,\n kernel_regularizer=tf.contrib.layers.l2_regularizer(kernel_regularizer),\n kernel_initializer=tf.truncated_normal_initializer(stddev=kernel_initializer))\n conv3_1x1 = tf.layers.conv2d(vgg_layer3_out, num_classes, 1, padding=padding,\n kernel_regularizer=tf.contrib.layers.l2_regularizer(kernel_regularizer),\n kernel_initializer=tf.truncated_normal_initializer(stddev=kernel_initializer))\n\n # Transpose, Upsample by 2, by 2 and by 8\n output_1 = tf.layers.conv2d_transpose(conv7_1x1, num_classes, kernel_size, strides, padding=padding,\n kernel_regularizer=tf.contrib.layers.l2_regularizer(kernel_regularizer),\n kernel_initializer=tf.truncated_normal_initializer(stddev=kernel_initializer))\n\n # Skip layers, add them in\n output_1 = tf.add(output_1, conv4_1x1, name='l_add_2')\n output_2 = tf.layers.conv2d_transpose(output_1, num_classes, kernel_size, strides=(2, 2), padding=padding,\n kernel_regularizer=tf.contrib.layers.l2_regularizer(kernel_regularizer),\n kernel_initializer=tf.truncated_normal_initializer(stddev=kernel_initializer))\n \n output_2 = tf.add(output_2, conv3_1x1, name='l_add_3')\n output = tf.layers.conv2d_transpose(output_2, num_classes, kernel_size*4, strides=(8, 8), padding=padding,\n kernel_regularizer=tf.contrib.layers.l2_regularizer(kernel_regularizer),\n kernel_initializer=tf.truncated_normal_initializer(stddev=kernel_initializer))\n\n ## TODO, not sure how to use it\n ## from https://discussions.udacity.com/t/here-is-some-advice-and-clarifications-about-the-semantic-segmentation-project/403100\n #pool3_out_scaled = tf.multiply(pool3_out, 0.0001, name='pool3_out_scaled')\n #pool4_out_scaled = tf.multiply(pool4_out, 0.01, name='pool4_out_scaled')\n\n #debug(output, [tf.shape(output)[1:3]])\n return output\ntests.test_layers(layers)\n\n\ndef optimize(nn_last_layer, correct_label, learning_rate, num_classes):\n \"\"\"\n Build the TensorFLow loss and optimizer operations.\n :param nn_last_layer: TF Tensor of the last layer in the neural network\n :param correct_label: TF Placeholder for the correct label image\n :param learning_rate: TF Placeholder for the learning rate\n :param num_classes: Number of classes to classify\n :return: Tuple of (logits, train_op, cross_entropy_loss)\n \"\"\"\n\n # 2D reshape\n logits = tf.reshape(nn_last_layer, (-1, num_classes))\n labels = tf.reshape(correct_label, (-1, num_classes))\n\n # Classification and Loss\n \n # Loss function\n cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=labels))\n \n '''\n When adding l2-regularization, setting a regularizer in the arguments of \n the tf.layers is not enough. Regularization loss terms must be manually \n added to your loss function. otherwise regularization is not implemented.\n regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n cross_entropy_loss = tf.add(cross_entropy_loss, sum(regularization_losses))\n '''\n \n # Training operation\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n train_op = optimizer.minimize(cross_entropy_loss)\n \n ## Debugging\n #debug(logits, [logits])\n #debug(correct_label, [correct_label])\n\n return logits, train_op, cross_entropy_loss\ntests.test_optimize(optimize)\n\n\ndef train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image,\n correct_label, keep_prob, learning_rate):\n \"\"\"\n Train neural network and print out the loss during training.\n :param sess: TF Session\n :param epochs: Number of epochs\n :param batch_size: Batch size\n :param get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size)\n :param train_op: TF Operation to train the neural network\n :param cross_entropy_loss: TF Tensor for the amount of loss\n :param input_image: TF Placeholder for input images\n :param correct_label: TF Placeholder for label images\n :param keep_prob: TF Placeholder for dropout keep probability\n :param learning_rate: TF Placeholder for learning rate\n \"\"\" \n \n init_g = tf.global_variables_initializer()\n init_l = tf.local_variables_initializer()\n sess.run(init_g)\n sess.run(init_l)\n for epoch in range(epochs):\n i = 0\n for image, label in get_batches_fn(batch_size): # pair of images and labels\n _, c_entropy_loss = sess.run([train_op,cross_entropy_loss]\n\t\t\t,feed_dict={input_image:image, correct_label:label, keep_prob:k_prob, learning_rate:l_rate})\n i += 1\n if i % 10 == 0:\n print(\"Epoch {} Batch {} Loss {:.3f}\".format(epoch+1, i, c_entropy_loss))\ntests.test_train_nn(train_nn)\n\ndef run():\n #tests.test_for_kitti_dataset(data_dir)\n\n # Download pretrained vgg model\n helper.maybe_download_pretrained_vgg(data_dir)\n\n # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.\n # You'll need a GPU with at least 10 teraFLOPS to train on.\n # https://www.cityscapes-dataset.com/\n\n # Memory parameters https://medium.com/@lisulimowicz/tensorflow-cpus-and-gpus-configuration-9c223436d4ef\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n\n with tf.Session(config=config) as sess:\n \n tf.global_variables_initializer()\n tf.local_variables_initializer()\n \n # Placeholders\n labels = tf.placeholder(tf.float32, shape = [None, None, None, num_classes])\n learning_rate = tf.placeholder(tf.float32)\n \n # Path to vgg model\n vgg_path = os.path.join(data_dir, 'vgg')\n # Create function to get batches\n get_batches_fn = helper.gen_batch_function(os.path.join(data_dir, 'data_road/training'), image_shape)\n\n # OPTIONAL: Augment Images for better results\n # https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network\n\n # Build NN using load_vgg, layers, and optimize function\n input_image, keep_prob, layer3_out, layer4_out, layer7_out = load_vgg(sess, vgg_path)\n layer_output = layers(layer3_out, layer4_out, layer7_out, num_classes)\n logits, train_op, cross_entropy_loss = optimize(layer_output, labels, learning_rate, num_classes)\n\n # Train NN using the train_nn function\n train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image, labels, keep_prob, learning_rate)\n \n # Save inference data using helper.save_inference_samples\n helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image)\n\n # Apply the trained model to a video\n def complete_pipeline(img):\n \"\"\"\n Sample code taken from gen_test_output helper method\n \"\"\"\n image = scipy.misc.imresize(img, image_shape)\n im_softmax = sess.run([tf.nn.softmax(logits)], {keep_prob: 1.0, input_image: [image]})\n im_softmax = im_softmax[0][:, 1].reshape(image_shape[0], image_shape[1])\n segmentation = (im_softmax > 0.5).reshape(image_shape[0], image_shape[1], 1)\n mask = np.dot(segmentation, np.array([[0, 255, 0, 127]]))\n mask = scipy.misc.toimage(mask, mode=\"RGBA\")\n street_im = scipy.misc.toimage(image)\n street_im.paste(mask, box=None, mask=mask)\n return np.array(street_im)\n \n def video_pipeline(current_image):\n ''' Complete video pipeline '''\n return complete_pipeline(current_image)\n\n def generate_video(output, process_image):\n ''' Generate a video '''\n print('Generating video to: {}'.format(output))\n clip1 = VideoFileClip(input_file)#.subclip(35,45) # 0,5 TODO remove this\n video_clip = clip1.fl_image(process_image)\n video_clip.write_videofile(output, audio=False)\n clip1.reader.close()\n clip1.audio.reader.close_proc()\n return output\n\n ###video_output = generate_video(output_file, video_pipeline)\n\nif __name__ == '__main__':\n then = time.time()\n run()\n now = time.time()\n diff = now - then\n minutes, seconds = int(diff // 60), int(diff % 60)\n print('Elapsed time {:d}:{:d} minutes'.format(minutes, seconds))\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"245106934","text":"import thulac\nimport os\nstop_words_path = os.path.join(os.path.abspath('.'), 'stopwords.txt')\npath1 = os.path.join(os.path.abspath('.'), 'neg.txt')\npath2 = os.path.join(os.path.abspath('.'), 'pos.txt')\ndef get_stop_words():\n with open(stop_words_path, 'rt', encoding='utf-8') as f:\n return [line.replace('\\n', '') for line in f]\n\ndef get_word2vec_words():\n with open(path1, 'rt', encoding='utf-8') as f:\n raw_list1 = [line.replace('\\n', '') for line in f]\n with open(path2, 'rt', encoding='utf-8') as f:\n raw_list2 = [line.replace('\\n', '') for line in f]\n return raw_list1, raw_list2\n\ndef token(sequence):\n stop_words = get_stop_words()\n thu1 = thulac.thulac(seg_only=True, filt=True)\n #thulac直接返回str,使用split切分为数组,然后去除停用词\n seq = thu1.cut(sequence, text=True).split(' ')\n seq_no_stop_words = [value for value in seq if value not in stop_words]\n return seq_no_stop_words\n\ndef generate_train_data():\n _, raw_text1= get_word2vec_words()\n raw_text1.extend(_)\n # 对每一个句子进行分词后添加到总的语料集中\n with open('news_tokend_datasets', 'at', encoding='utf-8') as f:\n for index, line in enumerate(raw_text1):\n corpus = ' '.join(token(line))\n corpus += '\\n'\n f.write(corpus)\n\n if index %100 == 0: print(index)\n","sub_path":"Sentiment/CommentsToken/GenerateTokenData.py","file_name":"GenerateTokenData.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"338192668","text":"#coding:utf8\n\n# Copyright 2019 longpeng2008. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# If you find any problem,please contact us\n#\n# longpeng2008to2012@gmail.com \n#\n# or create issues\n# =============================================================================\nfrom __future__ import print_function, division\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nimport torchvision\nfrom torchvision import datasets, models, transforms\nimport os\nfrom net import simpleconv3\n\n## 使用tensorboardX进行可视化\nfrom tensorboardX import SummaryWriter\nwriter = SummaryWriter('logs') ## 创建一个SummaryWriter的示例,默认目录名字为runs\n\n## 训练主函数\ndef train_model(model, criterion, optimizer, scheduler, num_epochs=25):\n for epoch in range(num_epochs):\n print('Epoch {}/{}'.format(epoch, num_epochs - 1))\n for phase in ['train', 'val']:\n if phase == 'train':\n scheduler.step()\n model.train(True) ## 设置为训练模式\n else:\n model.train(False) ## 设置为验证模式\n\n running_loss = 0.0 ##损失变量\n running_accs = 0.0 ##精度变量\n number_batch = 0 ##\n ## 从dataloaders中获得数据\n for data in dataloaders[phase]:\n inputs, labels = data \n if use_gpu:\n inputs = inputs.cuda()\n labels = labels.cuda()\n\n optimizer.zero_grad() ##清空梯度\n outputs = model(inputs) ##前向运行\n _, preds = torch.max(outputs.data, 1) ##使用max()函数对输出值进行操作,得到预测值索引\n loss = criterion(outputs, labels) ##计算损失\n if phase == 'train':\n loss.backward() ##误差反向传播\n optimizer.step() ##参数更新\n\n running_loss += loss.data.item()\n running_accs += torch.sum(preds == labels).item()\n number_batch += 1\n\n ## 得到每一个epoch的平均损失与精度\n epoch_loss = running_loss / number_batch\n epoch_acc = running_accs / dataset_sizes[phase]\n \n ## 收集精度和损失用于可视化\n if phase == 'train':\n writer.add_scalar('data/trainloss', epoch_loss, epoch)\n writer.add_scalar('data/trainacc', epoch_acc, epoch)\n else:\n writer.add_scalar('data/valloss', epoch_loss, epoch)\n writer.add_scalar('data/valacc', epoch_acc, epoch)\n\n print('{} Loss: {:.4f} Acc: {:.4f}'.format(\n phase, epoch_loss, epoch_acc))\n\n writer.close()\n return model\n\nif __name__ == '__main__':\n\n image_size = 60 ##图像统一缩放大小\n crop_size = 48 ##图像裁剪大小,即训练输入大小\n nclass = 2 ##分类类别数\n model = simpleconv3(nclass) ##创建模型\n data_dir = './data' ##数据目录\n \n ## 模型缓存接口\n if not os.path.exists('models'):\n os.mkdir('models')\n\n ## 检查GPU是否可用,如果是使用GPU,否使用CPU\n use_gpu = torch.cuda.is_available()\n if use_gpu:\n model = model.cuda()\n print(model)\n\n ## 创建数据预处理函数,训练预处理包括随机裁剪缩放、随机翻转、归一化,验证预处理包括中心裁剪,归一化\n data_transforms = {\n 'train': transforms.Compose([\n transforms.RandomSizedCrop(48),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.5,0.5,0.5], [0.5,0.5,0.5])\n ]),\n 'val': transforms.Compose([\n transforms.Scale(64),\n transforms.CenterCrop(48),\n transforms.ToTensor(),\n transforms.Normalize([0.5,0.5,0.5], [0.5,0.5,0.5])\n ]),\n }\n\n ## 使用torchvision的dataset ImageFolder接口读取数据\n image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),\n data_transforms[x]) for x in ['train', 'val']}\n\n ## 创建数据指针,设置batch大小,shuffle,多进程数量\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],\n batch_size=16,\n shuffle=True,\n num_workers=4) for x in ['train', 'val']}\n ## 获得数据集大小\n dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}\n\n ## 优化目标使用交叉熵,优化方法使用带动量项的SGD,学习率迭代策略为step,每隔100个epoch,变为原来的0.1倍\n criterion = nn.CrossEntropyLoss()\n optimizer_ft = optim.SGD(model.parameters(), lr=0.1, momentum=0.9)\n exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=100, gamma=0.1)\n\n model = train_model(model=model,\n criterion=criterion,\n optimizer=optimizer_ft,\n scheduler=exp_lr_scheduler,\n num_epochs=300)\n\n torch.save(model.state_dict(),'models/model.pt')\n","sub_path":"computer_vision/projects/classification/pytorch/simpleconv3/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"423685459","text":"import numpy as np\n\nfrom desc.backend import put\nfrom desc.basis import FourierZernikeBasis\n\n\ndef get_initial_guess_scale_bdry(axis, bdry, bdry_ratio,\n R_basis:FourierZernikeBasis, Z_basis:FourierZernikeBasis):\n \"\"\"Generate initial guess by scaling boundary shape\n\n Parameters\n ----------\n axis : ndarray, shape(Naxis,3)\n array of axis Fourier coeffs [n,Rcoeff, Zcoeff]\n bdry : ndarray, shape(Nbdry,4)\n array of boundary Fourier coeffs [m,n,Rcoeff, Zcoeff]\n OR\n array of real space coordinates, [theta,phi,R,Z]\n bdry_ratio : float\n fraction in range [0,1] of the full non-axisymmetric boundary to use\n R_basis : FourierZernikeBasis\n DESCRIPTION\n Z_basis : FourierZernikeBasis\n DESCRIPTION\n\n Returns\n -------\n cR : ndarray, shape(N_coeffs,)\n Fourier-Zernike coefficients for R, following indexing given in zern_idx\n cZ : ndarray, shape(N_coeffs,)\n Fourier-Zernike coefficients for Z, following indexing given in zern_idx\n\n \"\"\"\n modes_R = R_basis.modes\n modes_Z = Z_basis.modes\n\n cR = np.zeros((R_basis.num_modes,))\n cZ = np.zeros((Z_basis.num_modes,))\n\n for m, n, bR, bZ in bdry:\n\n bR *= np.clip(bdry_ratio+(n == 0), 0, 1)\n bZ *= np.clip(bdry_ratio+(n == 0), 0, 1)\n\n if m == 0:\n\n idx = np.where(axis[:, 0] == n)\n if idx[0].size == 0:\n aR = bR\n aZ = bZ\n else:\n aR = axis[idx, 1][0, 0]\n aZ = axis[idx, 2][0, 0]\n\n cR = put(cR, np.where(np.logical_and.reduce(\n (modes_R[:, 0] == 0, modes_R[:, 1] == 0, modes_R[:, 2] == n)))[0], (bR+aR)/2)\n cZ = put(cZ, np.where(np.logical_and.reduce(\n (modes_Z[:, 0] == 0, modes_Z[:, 1] == 0, modes_Z[:, 2] == n)))[0], (bZ+aZ)/2)\n cR = put(cR, np.where(np.logical_and.reduce(\n (modes_R[:, 0] == 2, modes_R[:, 1] == 0, modes_R[:, 2] == n)))[0], (bR-aR)/2)\n cZ = put(cZ, np.where(np.logical_and.reduce(\n (modes_Z[:, 0] == 2, modes_Z[:, 1] == 0, modes_Z[:, 2] == n)))[0], (bZ-aZ)/2)\n\n else:\n cR = put(cR, np.where(np.logical_and.reduce((modes_R[:, 0] == np.absolute(\n m), modes_R[:, 1] == m, modes_R[:, 2] == n)))[0], bR)\n cZ = put(cZ, np.where(np.logical_and.reduce((modes_Z[:, 0] == np.absolute(\n m), modes_Z[:, 1] == m, modes_Z[:, 2] == n)))[0], bZ)\n\n return cR, cZ\n","sub_path":"desc/init_guess.py","file_name":"init_guess.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"487111582","text":"from __future__ import print_function\n\nimport paddle\nimport paddle.fluid as fluid\n\n__all__ = ['reshape', 'embedding']\n\ndef reshape(x, shape, actual_shape=None, act=None, inplace=True, name=None):\n \"\"\"\n Gives a new shape to the input Tensor without changing its data.\n\n The target shape can be given by :attr:`shape` or :attr:`actual_shape`.\n :attr:`shape` is a list of integer while :attr:`actual_shape` is a tensor\n variable. :attr:`actual_shape` has a higher priority than :attr:`shape`\n if it is provided, while :attr:`shape` still should be set correctly to\n gurantee shape inference in compile-time.\n\n Some tricks exist when specifying the target shape.\n\n 1. -1 means the value of this dimension is inferred from the total element\n number of x and remaining dimensions. Thus one and only one dimension can\n be set -1.\n\n 2. 0 means the actual dimension value is going to be copied from the\n corresponding dimension of x. The indice of 0s in shape can not exceed\n Rank(X).\n\n Here are some examples to explain it.\n\n 1. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape\n is [6, 8], the reshape operator will transform x into a 2-D tensor with\n shape [6, 8] and leaving x's data unchanged.\n\n 2. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape\n specified is [2, 3, -1, 2], the reshape operator will transform x into a\n 4-D tensor with shape [2, 3, 4, 2] and leaving x's data unchanged. In this\n case, one dimension of the target shape is set to -1, the value of this\n dimension is inferred from the total element number of x and remaining\n dimensions.\n\n 3. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape\n is [-1, 0, 3, 2], the reshape operator will transform x into a 4-D tensor\n with shape [2, 4, 3, 2] and leaving x's data unchanged. In this case,\n besides -1, 0 means the actual dimension value is going to be copied from\n the corresponding dimension of x.\n\n Args:\n x(variable): The input tensor.\n shape(list): The new shape. At most one dimension of the new shape can\n be -1.\n actual_shape(variable): An optional input. If provided, reshape\n according to this given shape rather than\n :attr:`shape` specifying shape. That is to\n say :attr:`actual_shape` has a higher priority\n than :attr:`shape`.\n act (str): The non-linear activation to be applied to output variable.\n inplace(bool): If this flag is set true, the output\n shares data with input without copying, otherwise\n a new output tensor is created\n whose data is copied from input x.\n name (str): The name of this layer. It is optional.\n\n Returns:\n Variable: The output tensor.\n\n Raises:\n TypeError: if actual_shape is neither Variable nor None.\n\n Examples:\n .. code-block:: python\n\n data = fluid.layers.data(\n name='data', shape=[2, 4, 6], dtype='float32')\n reshaped = fluid.layers.reshape(\n x=data, shape=[-1, 0, 3, 2], act='tanh', inplace=True)\n \"\"\"\n\n if not (isinstance(shape, list) or isinstance(shape, tuple)):\n raise ValueError(\"Input shape must be a python list or tuple.\")\n inputs = {\"X\": x}\n if isinstance(actual_shape, fluid.framework.Variable):\n inputs[\"Shape\"] = actual_shape\n elif actual_shape is not None:\n raise TypeError(\"actual_shape should either be Variable or None\")\n\n # Validate the shape\n unk_dim_idx = -1\n for dim_idx, dim_size in enumerate(shape):\n if dim_size == -1:\n assert unk_dim_idx == -1, (\n \"Only one dimension in shape can be unknown.\")\n unk_dim_idx = dim_idx\n elif dim_size == 0:\n assert dim_idx < len(x.shape), (\n \"The indice of 0s in shape can not exceed Rank(X).\")\n else:\n assert dim_size > 0, (\n \"Each dimension size given in shape must not be negtive \"\n \"except one unknown dimension.\")\n\n helper = fluid.layer_helper.LayerHelper(\"reshape2\", **locals())\n out = x if inplace else helper.create_variable_for_type_inference(dtype=x.dtype)\n x_shape = helper.create_variable_for_type_inference(dtype=x.dtype)\n helper.append_op(\n type=\"reshape2\",\n inputs=inputs,\n attrs={\"shape\": shape},\n outputs={\"Out\": out,\n \"XShape\": x_shape})\n\n return helper.append_activation(out)\n\n\ndef embedding(input,\n size,\n is_sparse=False,\n is_distributed=False,\n padding_idx=None,\n param_attr=None,\n dtype='float32'):\n \"\"\"\n **Embedding Layer**\n\n This layer is used to lookup embeddings of IDs, provided by :attr:`input`, in\n a lookup table. The result of this lookup is the embedding of each ID in the\n :attr:`input`.\n\n All the input variables are passed in as local variables to the LayerHelper\n constructor.\n\n Args:\n input(Variable): The tensor variable containing the IDs.\n size(tuple|list): The shape of the look up table parameter. It should\n have two elements which indicate the size of the dictionary of\n embeddings and the size of each embedding vector respectively.\n is_sparse(bool): The flag indicating whether to use sparse update.\n is_distributed(bool): Whether to run lookup table from remote parameter server.\n padding_idx(int|long|None): If :attr:`None`, it makes no effect to lookup.\n Otherwise the given :attr:`padding_idx` indicates padding the output\n with zeros whenever lookup encounters it in :attr:`input`. If\n :math:`padding_idx < 0`, the :attr:`padding_idx` to use in lookup is\n :math:`size[0] + dim`.\n param_attr(ParamAttr): Parameters for this layer\n dtype(np.dtype|core.VarDesc.VarType|str): The type of data : float32, float_16, int etc\n\n Returns:\n Variable: The tensor variable storing the embeddings of the \\\n supplied inputs.\n\n Examples:\n .. code-block:: python\n\n dict_size = len(dataset.ids)\n data = fluid.layers.data(name='ids', shape=[32, 32], dtype='float32')\n fc = fluid.layers.embedding(input=data, size=[dict_size, 16])\n \"\"\"\n\n helper = fluid.layer_helper.LayerHelper('embedding', **locals())\n w = helper.create_parameter(\n attr=helper.param_attr, shape=size, dtype=dtype, is_bias=False)\n tmp = helper.create_variable_for_type_inference(dtype)\n padding_idx = -1 if padding_idx is None else padding_idx if padding_idx >= 0 else (\n size[0] + padding_idx)\n helper.append_op(\n type='lookup_table',\n inputs={'Ids': input,\n 'W': w},\n outputs={'Out': tmp},\n attrs={\n 'grad_inplace': True,\n 'is_sparse': is_sparse,\n 'is_distributed': is_distributed,\n 'padding_idx': padding_idx\n })\n return tmp\n\n\ndef fused_embedding_seq_pool(input,\n size,\n is_sparse=False,\n is_distributed=False,\n padding_idx=None,\n param_attr=None,\n dtype='float32'):\n \"\"\"\n **Embedding Layer**\n\n This layer is used to lookup embeddings of IDs, provided by :attr:`input`, in\n a lookup table. The result of this lookup is the embedding of each ID in the\n :attr:`input`.\n\n All the input variables are passed in as local variables to the LayerHelper\n constructor.\n\n Args:\n input(Variable): The tensor variable containing the IDs.\n size(tuple|list): The shape of the look up table parameter. It should\n have two elements which indicate the size of the dictionary of\n embeddings and the size of each embedding vector respectively.\n is_sparse(bool): The flag indicating whether to use sparse update.\n is_distributed(bool): Whether to run lookup table from remote parameter server.\n padding_idx(int|long|None): If :attr:`None`, it makes no effect to lookup.\n Otherwise the given :attr:`padding_idx` indicates padding the output\n with zeros whenever lookup encounters it in :attr:`input`. If\n :math:`padding_idx < 0`, the :attr:`padding_idx` to use in lookup is\n :math:`size[0] + dim`.\n param_attr(ParamAttr): Parameters for this layer\n dtype(np.dtype|core.VarDesc.VarType|str): The type of data : float32, float_16, int etc\n\n Returns:\n Variable: The tensor variable storing the embeddings of the \\\n supplied inputs.\n\n Examples:\n .. code-block:: python\n\n dict_size = len(dataset.ids)\n data = fluid.layers.data(name='ids', shape=[32, 32], dtype='float32')\n fc = fluid.layers.embedding(input=data, size=[dict_size, 16])\n \"\"\"\n\n helper = fluid.layer_helper.LayerHelper('fused_embedding_seq_pool', **locals())\n w = helper.create_parameter(\n attr=helper.param_attr, shape=size, dtype=dtype, is_bias=False)\n tmp = helper.create_variable_for_type_inference(dtype)\n padding_idx = -1 if padding_idx is None else padding_idx if padding_idx >= 0 else (\n size[0] + padding_idx)\n helper.append_op(\n type='fused_embedding_seq_pool',\n inputs={'Ids': input,\n 'W': w},\n outputs={'Out': tmp},\n attrs={\n 'is_sparse': is_sparse,\n 'combiner': 'sum'\n })\n return tmp\n\n\ndef fused_hash_embedding_seq_pool(x,\n size,\n hash_size,\n num_hash,\n is_sparse=False,\n is_distributed=False,\n padding_idx=None,\n param_attr=None,\n dtype='float32'):\n \"\"\"\n **Fused Hash, Embedding and Sequence Pool Op Layer**\n \"\"\"\n\n helper = fluid.layer_helper.LayerHelper('fused_hash_embedding_seq_pool', **locals())\n w = helper.create_parameter(\n attr=helper.param_attr, shape=size, dtype=dtype, is_bias=False)\n tmp = helper.create_variable_for_type_inference(dtype)\n helper.append_op(\n type='fused_hash_embedding_seq_pool',\n inputs={'X': x,\n 'W': w},\n outputs={'Out': tmp},\n attrs={\n 'num_hash': num_hash,\n 'mod_by': hash_size,\n 'is_sparse': is_sparse,\n 'combiner': 'sum'\n })\n return tmp\n","sub_path":"PyramidDNN/fluid/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":10626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"219516180","text":"from .mnist_model import MnistModel\nimport cntk\nfrom cntk.layers import Convolution2D, Activation, MaxPooling, Dense, Dropout, default_options, Sequential\nfrom cntk.initializer import normal\nfrom cntk.ops import relu, minus, constant\n\n\nclass AlexNet(MnistModel):\n\n def __init__(self, input, output):\n super().__init__(input, output)\n self.create_model()\n\n def __local_response_normalization(self, k, n, alpha, beta, name=''):\n x = cntk.placeholder(name='lrn_arg')\n x2 = cntk.square(x)\n x2s = cntk.reshape(x2, (1, cntk.InferredDimension), 0, 1)\n W = cntk.constant(alpha / (2 * n + 1), (1, 2 * n + 1, 1, 1), name='W')\n y = cntk.convolution(W, x2s)\n b = cntk.reshape(y, cntk.InferredDimension, 0, 2)\n den = cntk.exp(beta * cntk.log(k + b))\n apply_x = cntk.element_divide(x, den)\n return apply_x\n\n def create_model(self):\n\n mean_removed_features = minus(\n self.input, constant(114), name='mean_removed_input')\n\n with default_options(activation=None, pad=True, bias=True):\n self.model = Sequential([\n Convolution2D((11, 11), 96, init=normal(0.01),\n pad=False, name='conv1'),\n Activation(activation=relu, name='relu1'),\n self.__local_response_normalization(\n 1.0, 2, 0.0001, 0.75, name='norm1'),\n MaxPooling((3, 3), (2, 2), name='pool1'),\n Convolution2D((5, 5), 192, init=normal(\n 0.01), init_bias=0.1, name='conv2'),\n Activation(activation=relu, name='relu2'),\n self.__local_response_normalization(\n 1.0, 2, 0.0001, 0.75, name='norm2'),\n MaxPooling((3, 3), (2, 2), name='pool2'),\n Convolution2D((3, 3), 384, init=normal(0.01), name='conv3'),\n Activation(activation=relu, name='relu3'),\n Convolution2D((3, 3), 384, init=normal(\n 0.01), init_bias=0.1, name='conv4'),\n Activation(activation=relu, name='relu4'),\n Convolution2D((3, 3), 256, init=normal(\n 0.01), init_bias=0.1, name='conv5'),\n Activation(activation=relu, name='relu5'),\n MaxPooling((3, 3), (2, 2), name='pool5'),\n Dense(4096, init=normal(0.005), init_bias=0.1, name='fc6'),\n Activation(activation=relu, name='relu6'),\n Dropout(0.5, name='drop6'),\n Dense(4096, init=normal(0.005), init_bias=0.1, name='fc7'),\n Activation(activation=relu, name='relu7'),\n Dropout(0.5, name='drop7'),\n Dense(self.number_labels, init=normal(0.01), name='fc8')\n ])(mean_removed_features)\n","sub_path":"models/alex_net.py","file_name":"alex_net.py","file_ext":"py","file_size_in_byte":2810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"389390882","text":"from __future__ import unicode_literals \n\n# Django settings for lims project\n\nimport sys\nimport os\n\ntry:\n from app_data import APP_PUBLIC_DATA\nexcept ImportError:\n print >>sys.stderr, '''app_data.py file not found.'''\n\nPROJECT_ROOT = os.path.normpath(os.path.join(\n os.path.dirname(os.path.abspath(__file__)),'..'))\n\nDEBUG = True\n\nADMINS = (\n ('Site Admin', 'site_admin@email.com'),\n)\n\nMANAGERS = ADMINS\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'sqlite3', \n 'NAME': os.path.join(PROJECT_ROOT, 'project.db'), \n 'USER': '',\n 'PASSWORD': '',\n 'HOST': '',\n 'PORT': '',\n }\n}\n\n\n# Hosts/domain names that are valid for this site.\n# INSTALL TODO: required if DEBUG is False\n# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts\nALLOWED_HOSTS = []\n\n# NOTE: Django internationalization is not being used\nUSE_I18N = False\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\n# NOTE: Django internationalization is not being used; so this has no effect.\nLANGUAGE_CODE = 'en-us'\n# Django format localization is not being used\nUSE_L10N = False\n\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# INSTALL TODO: set to the local time zone for the installation\nTIME_ZONE = 'UTC'\n# Use timezone aware datetime information when handling datetimes in Django:\n# NOTE: postgresql (stores) values internally as UTC, and the connection \n# converts using the TIME_ZONE setting .\nUSE_TZ = True\n\n# NOTE: /accounts/login is the default\nLOGIN_URL = '/accounts/login/'\n# Default if \"next\" is not given as a request param\nLOGIN_REDIRECT_URL='/lims/'\nLOGOUT_REDIRECT_URL='/lims/'\n\n# Timeout, in seconds; will cause user logout: 8 hours\n# NOTE: this will not log the browser out until a request is made.\nSESSION_COOKIE_AGE = 60*60*8\nSESSION_EXPIRE_AT_BROWSER_CLOSE = True\n# NOTE: SSL may only be enforced on the production server\n# SECURE_SSL_REDIRECT = True\n# SESSION_COOKIE_SECURE = True\n# CSRF_COOKIE_SECURE = True\n\nSTATIC_ROOT = ''\nSTATIC_URL = '/_static/'\nSTATICFILES_DIRS = ()\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n)\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = 'tell_no_1'\nSITE_ID = 1\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.contrib.auth.context_processors.auth',\n 'django.template.context_processors.debug',\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.media',\n 'django.template.context_processors.static',\n 'django.template.context_processors.tz',\n 'django.contrib.messages.context_processors.messages',\n \"lims.webpack_bundle_hash_name_processor.bundle_context_processor\", \n ],\n },\n },\n]\n\n\nMIDDLEWARE = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n # Uncomment the next line for simple clickjacking protection:\n # 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'lims.urls'\n\nWSGI_APPLICATION = 'lims.wsgi.application'\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.admin',\n 'django.contrib.admindocs',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n# 'aldjemy',\n 'reports',\n 'lims',\n 'db',\n)\n\n# Turn off migrations during testing (just make the database from models.py)\nSOUTH_TESTS_MIGRATE = False\n\n# ICCBL-Setting: Directory for temp files created on download\nTEMP_FILE_DIR='/tmp'\n\n# Base path for profiling\nPROFILE_LOG_BASE='/tmp'\n\n# ICCBL-Setting: structure image cache directory if available. \n# @see db.views for details\nWELL_STRUCTURE_IMAGE_DIR=''\n\n# ICCBL-Setting: Maximum rows to cache in the database table \"well_query_index\"\n# @see db.api.ScreenResultResource\nMAX_WELL_INDEXES_TO_CACHE=3e+08\n\n# ICCBL-Setting: Maximum rows to cache per query for cached_resultproxy:\n# @see reports.sqlalchemy_resource\nMAX_ROWS_FOR_CACHE_RESULTPROXY=1e4\n\n# ICCBL-Setting: Minimum wells for insertion into the well_query_index before \n# clearing older indexes; for performance tuning on screen result / well queries.\n# @see db.api.ScreenResultResource\nMIN_WELLS_TO_CLEAR_INDEXES = 3e5\n\n# If not True, then only staff may log in to the system\n# see reports/auth.py\nIS_PRODUCTION_READY = False\n\n# NOTE: SSL may only be enforced on the production server\n# NOTE: do not use with the migration app, as it uses insecure HTTP to initialize, \n# TODO: enable these settings when in production\nif IS_PRODUCTION_READY is True:\n SECURE_SSL_REDIRECT = True\n SESSION_COOKIE_SECURE = True\n CSRF_COOKIE_SECURE = True\n\n# 20191206: TODO: verify with Jen\nRESTRICT_ALL_SEQUENCES=True\n\n\n# ICCBL-Setting: For use when authenticating\n# @see reports.api_base\nBASIC_AUTH_REALM='screensaver'\n\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n 'LOCATION': 'unique-snowflake', \n },\n 'reports_cache': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n 'LOCATION': 'reports_cache'\n },\n 'resource_cache': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n 'LOCATION': 'resource_cache'\n },\n 'db_cache': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n 'LOCATION': 'db_cache'\n },\n 'screen_cache': {\n 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',\n 'LOCATION': 'screen_cache'\n },\n}\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django': {\n 'handlers': ['mail_admins'],\n 'level': 'WARN',\n 'propagate': True,\n },\n }\n}\n\n","sub_path":"lims/base_settings.py","file_name":"base_settings.py","file_ext":"py","file_size_in_byte":6705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"271381014","text":"# Traverse a graph \n# Pick a node. Visit its adjacent, unvisited vertices, mark them as visited and add them to the Queue\n# If there are no adjacent vertices left, remove first node from the queue and repeat previous step\n# continue doing this till the Queue is empty\n\n# define a graph\ngraph = {\n\t'A': ['B', 'C'],\n\t'B': ['D', 'E'],\n\t'C': ['F'],\n\t'D': [],\n\t'E': ['F'],\n\t'F': []\n}\n\n# create a Queue and a list to keep track of Visited vertices\nqueue = []\nvisited = []\n\n# define a function to implement BFS traversal\ndef bfs_traversal(visited, graph, node):\n\t# visit the source node and add it to the queue\n\tvisited.append(node)\n\tqueue.append(node)\n\n\t# explore unvisited neighbors of nodes in Queue till it becomes empty\n\twhile queue:\n\t\ts = queue.pop(0)\n\t\tprint(s, end = \" \")\n\n\t\tfor neighbor in graph[s]:\n\t\t\tif neighbor not in visited:\n\t\t\t\tvisited.append(neighbor)\n\t\t\t\tqueue.append(neighbor)\n\nbfs_traversal(visited, graph, 'A')\n\n","sub_path":"breadthFirstSearch.py","file_name":"breadthFirstSearch.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"180786251","text":"import numpy as np\nfrom sigmoide import sigmoide\n\n\ndef costFunctionReg(theta, X, y, alpha):\n\n theta = np.matrix(theta)\n X = np.matrix(X)\n y = np.matrix(y)\n\n grad0 = np.multiply(-y, np.log(sigmoide(X * theta.T)))\n grad1 = np.multiply((1 - y), np.log(1 - sigmoide(X * theta.T)))\n reg = (alpha / 2 * len(X)) * np.sum(np.asarray(\n theta[:, 1:theta.shape[1]]) ** 2)\n\n return np.sum(grad0 - grad1) / (len(X)) + reg\n","sub_path":"mestrado/disciplinas/machine-learning/trabalhos/T1/Arquivos Python/costFunctionReg.py","file_name":"costFunctionReg.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"95826976","text":"# classifier/application/routes.py\nfrom flask import Flask\nfrom flask import request, jsonify\nfrom application import app\nfrom spam_classifier import classify\n\n'''Cкрипт run.py находится на верхнем уровне и содержит всего одну строку, которая\nимпортирует экземпляр приложения: from application import app\n'''\n\n@app.route('/classify_text', methods=['POST'])\ndef classify_text():\n data = request.json\n text = data.get('text')\n #Метод возвращает None, если запрашиваемого ключа нет\n if text is None:\n params = ', '.join(data.keys())\n #Преобразуем все полученные параметры в строку\n return jsonify({'message': f'Parameter \"{params}\" is invalid'}), 400\n #Ранее мы не указывали код ответа HTTP явно,\n #но на самом деле Flask выполнял эту работу за нас.\n #По умолчанию возвращается 200\n else:\n result = classify(text)\n return jsonify({'result': result})\n\n@app.route('/number_inc', methods=['GET'])\ndef number_inc():\n\targs = request.args\n\ttry:\n\t\tnum = int(args['num']) + 1\n\t\treturn str(num)\n\texcept:\n\t\treturn 'Необходимо ввести число'\n\n@app.route('/')\ndef hello_world():\n return 'Hello, World!' #возвращает приветствие в виде строки.\n\n@app.route('/hello_user', methods=['POST'])\ndef hello_user():\n data = request.json\n user = data['user']\n return f'hello {user}'\n","sub_path":"application/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"650870878","text":"import cv2\nimport numpy as np\n\nimg = cv2.imread('C:\\\\Users\\\\Jimit\\\\Desktop\\\\Project\\\\Original Images\\\\corners.jpg')\ngray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\nedges = cv2.Canny(gray,100, 250)\n\n# get image height, width\n(h, w) = img.shape[:2]\ncenter = (w / 2, h / 2)\n\n\nlines = cv2.HoughLinesP(edges,1,np.pi/90, 20, minLineLength = 50, maxLineGap = 200)\n\n#print (lines)\nfor line in lines:\n x1, y1, x2, y2 = line[0]\n cv2.line(img, (x1, y1), (x2, y2), (0, 255, 0), 2)\n print (line)\n\n\nfor theta in lines:\n a = np.cos(theta)\n b = np.sin(theta)\n\n a = np.rad2deg(a)\n b = np.rad2deg(b)\n\n print(a)\n print(b)\n\n\n\nM = cv2.getRotationMatrix2D(center, 270, 1)\n\nrotated = cv2.warpAffine(img, M, (w, h))\ncv2.imshow('blah', rotated)\ncv2.waitKey(0)\ncv2.imshow('lmao', img)\n\ncv2.imwrite('houghlines3.jpg',img)\n","sub_path":"Program/hough.py","file_name":"hough.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"590832730","text":"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Train the model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport re\nimport tensorflow as tf\n\nFLAGS = tf.app.flags.FLAGS\n\ntf.flags.DEFINE_string(\"train_dir\", \"\",\n \"Directory for saving and loading model checkpoints.\")\ntf.flags.DEFINE_string(\"input_file_pattern\", \"\",\n \"File pattern of sharded TFRecord input files.\")\n\ntf.flags.DEFINE_string(\"inception_checkpoint_file\", \"\",\n \"Path to a pretrained inception_v3 model.\")\ntf.flags.DEFINE_boolean(\"train_inception\", False,\n \"Whether to train inception submodel variables.\")\ntf.flags.DEFINE_boolean(\"train_inception_with_decay\", False,\n \"Whether to train inception submodel variables with decay.\")\n\ntf.flags.DEFINE_integer(\"number_of_steps\", 1000000, \"Number of training steps.\")\ntf.flags.DEFINE_integer(\"log_every_n_steps\", 10,\n \"Frequency at which loss and global step are logged.\")\n\n\n# training config\ntf.flags.DEFINE_integer(\"batch_size\", 200,\n \"Batch size.\")\ntf.flags.DEFINE_integer(\"sample_size\", 20,\n \"Actual batch size.\")\ntf.flags.DEFINE_integer(\"num_examples_per_epoch\", 225000*20,\n \"Number of examples per epoch of training data.\")\ntf.flags.DEFINE_string(\"optimizer\", \"SGD\",\n \"Optimizer for training the model.\")\ntf.flags.DEFINE_float(\"initial_learning_rate\", 2.0,\n \"Learning rate for the initial phase of training.\")\ntf.flags.DEFINE_float(\"learning_rate_decay_factor\", 0.5,\n \"Scale learning rate by this factor every num_epochs_per_decay epochs.\")\ntf.flags.DEFINE_float(\"num_epochs_per_decay\", 8.0,\n \"Scale learning rate by learning_rate_decay_factor every this many epochs.\")\ntf.flags.DEFINE_float(\"train_inception_learning_rate\", 0.0005,\n \"Learning rate when fine tuning the Inception v3 parameters.\")\ntf.flags.DEFINE_float(\"clip_gradients\", 5.0,\n \"If not None, clip gradients to this value.\")\ntf.flags.DEFINE_integer(\"max_checkpoints_to_keep\", 5,\n \"Maximum number of recent checkpoints to preserve.\")\ntf.flags.DEFINE_float(\"keep_checkpoint_every_n_hours\", 0.25,\n \"Keep a checkpoint every this many hours.\")\ntf.flags.DEFINE_integer(\"save_interval_secs\", 600,\n \"Save a checkpoint every this many secs.\")\ntf.flags.DEFINE_string(\"exclude_variable_patterns\", None,\n \"Filter (by comma separated regular expressions) variables that will not be\"\n \" loaded from and saved to checkpoints.\")\n\nimport ranker_model\n\ntf.logging.set_verbosity(tf.logging.INFO)\n\n\ndef main(unused_argv):\n assert FLAGS.input_file_pattern, \"--input_file_pattern is required\"\n assert FLAGS.train_dir, \"--train_dir is required\"\n\n # Create training directory.\n train_dir = FLAGS.train_dir\n if not tf.gfile.IsDirectory(train_dir):\n tf.logging.info(\"Creating training directory: %s\", train_dir)\n tf.gfile.MakeDirs(train_dir)\n\n # Build the TensorFlow graph.\n g = tf.Graph()\n with g.as_default():\n # Build the model.\n model = ranker_model.RankerModel(mode=\"train\")\n model.build()\n\n # Set up the learning rate.\n learning_rate_decay_fn = None\n if FLAGS.train_inception and not FLAGS.train_inception_with_decay:\n learning_rate = tf.constant(FLAGS.train_inception_learning_rate)\n else:\n learning_rate = tf.constant(FLAGS.initial_learning_rate)\n if FLAGS.learning_rate_decay_factor > 0:\n num_batches_per_epoch = (FLAGS.num_examples_per_epoch /\n FLAGS.batch_size)\n decay_steps = int(num_batches_per_epoch *\n FLAGS.num_epochs_per_decay)\n\n def _learning_rate_decay_fn(learning_rate, global_step):\n return tf.train.exponential_decay(\n learning_rate,\n global_step,\n decay_steps=decay_steps,\n decay_rate=FLAGS.learning_rate_decay_factor,\n staircase=True)\n\n learning_rate_decay_fn = _learning_rate_decay_fn\n\n # Set up the training ops.\n train_op = tf.contrib.layers.optimize_loss(\n loss=model.total_loss,\n global_step=model.global_step,\n learning_rate=learning_rate,\n optimizer=FLAGS.optimizer,\n clip_gradients=FLAGS.clip_gradients,\n learning_rate_decay_fn=learning_rate_decay_fn)\n\n local_init_op = tf.contrib.slim.learning._USE_DEFAULT\n\n if FLAGS.exclude_variable_patterns is not None:\n exclude_variables = []\n exclude_variable_names = []\n\n exclude_variable_patterns = map(lambda x: re.compile(x), FLAGS.exclude_variable_patterns.strip().split(\",\"))\n all_variables = tf.contrib.slim.get_variables()\n\n for var in all_variables:\n for pattern in exclude_variable_patterns:\n if pattern.match(var.name):\n exclude_variables.append(var)\n exclude_variable_names.append(var.name)\n print(\"variables to exclude:\", var.name)\n break\n print(\"%d variables to exclude.\" % len(exclude_variable_names))\n\n if exclude_variables:\n local_init_op = tf.variables_initializer(exclude_variables)\n\n variables_to_restore = tf.contrib.slim.get_variables_to_restore(exclude=exclude_variable_names)\n\n # Set up the Saver for saving and restoring model checkpoints.\n saver = tf.train.Saver(variables_to_restore,\n max_to_keep=FLAGS.max_checkpoints_to_keep,\n keep_checkpoint_every_n_hours=FLAGS.keep_checkpoint_every_n_hours)\n else:\n # Set up the Saver for saving and restoring model checkpoints.\n saver = tf.train.Saver(max_to_keep=FLAGS.max_checkpoints_to_keep,\n keep_checkpoint_every_n_hours=FLAGS.keep_checkpoint_every_n_hours)\n\n # Run training.\n tf.contrib.slim.learning.train(\n train_op = train_op,\n logdir = train_dir,\n log_every_n_steps = FLAGS.log_every_n_steps,\n graph = g,\n global_step = model.global_step,\n number_of_steps = FLAGS.number_of_steps,\n local_init_op = local_init_op,\n init_fn = model.init_fn,\n save_summaries_secs = 300,\n save_interval_secs = FLAGS.save_interval_secs,\n saver = saver)\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n","sub_path":"ranker/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"169849128","text":"from django import forms\nfrom customer.models import Customer\n\n\nclass CustomerForm(forms.ModelForm):\n \"\"\"CustomerForm definition.\"\"\"\n class Meta:\n model = Customer\n fields = ('customer_name', 'customer_phone', 'address', 'shop')\n widgets = {'customer_name': forms.TextInput(attrs={'type': \"text\", 'placeholder': \"Customer Name\", 'class': 'form-control customer_name', 'name': \"customer_name\"}), 'customer_phone': forms.TextInput(\n attrs={'type': \"text\", 'placeholder': \"Customer Phone\", 'class': 'form-control customer_phone', 'name': \"customer_phone\", }), 'address': forms.Textarea(\n attrs={'rows': \"2\", 'placeholder': \"Address (Optional)\", 'class': 'form-control adress', 'name': \"adress\", }), 'shop': forms.TextInput(\n attrs={'type': \"text\", 'placeholder': \"Shop Name\", 'class': 'form-control shop', 'name': \"shop\", }), }\n","sub_path":"customer/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"564440998","text":"def findPrimeFactors(num=int):\n left = num + 0\n factors = []\n for i in range(2, num):\n if left != 1:\n if left % i == 0:\n while left % i == 0:\n factors.append(i)\n left /= i\n else:\n break\n return factors\n\n\nprint(findPrimeFactors(126))\n","sub_path":"UsefulAlgorithms/FindPrimeFactors.py","file_name":"FindPrimeFactors.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"430078692","text":"from django.urls import path\nfrom . import views\nfrom django.conf.urls.static import static\nfrom django.conf import settings\n\nurlpatterns = [\n path('', views.accueil, name='accueil'),\n path('article/-', views.lire, name='lire'),\n path('contact/', views.contact, name='contact'),\n path('formulaire_article/', views.article, name='article'),\n path('nouveau_contact/', views.nouveau_contact, name='nouveau_contact'),\n path('voir_contacts/', views.voir_contacts, name='nouveau_contact'),\n path('connexion', views.connexion, name='connexion'),\n path('deconnexion', views.deconnexion, name='deconnexion'),\n path('inter', views.test_i18n, name='inter'),\n]\n\n\n","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"576128676","text":"# -*- coding: utf-8 -*-\n##########################################################################\n# NSAp - Copyright (C) CEA, 2020\n# Distributed under the terms of the CeCILL-B license, as published by\n# the CEA-CNRS-INRIA. Refer to the LICENSE file or to\n# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html\n# for details.\n##########################################################################\n\n\"\"\"\nThe Variational U-Net auto-encoder.\n\"\"\"\n\n\n# Imports\nimport logging\nimport collections\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as func\nfrom pynet.interfaces import DeepLearningDecorator\nfrom pynet.utils import Networks\nimport numpy as np\nfrom .base import BaseVAE\nfrom ..unet import Down, Up, Conv1x1x1\n\n\n# Global parameters\nlogger = logging.getLogger(\"pynet\")\n\n\n@Networks.register\n@DeepLearningDecorator(family=(\"encoder\", \"vae\"))\nclass VUNet(BaseVAE):\n \"\"\" VUNet.\n\n The Variational U-Net is a convolutional encoder-decoder neural network.\n The convolutional encoding/decoding parts are the same as the UNet.\n\n The model is composed of two sub-networks:\n\n 1. Given x (image), encode it into a distribution over the latent space -\n referred to as Q(z|x).\n 2. Given z in latent space (code representation of an image), decode it\n into the image it represents - referred to as f(z).\n \"\"\"\n\n def __init__(self, latent_dim, in_channels=1, depth=5,\n start_filts=64, up_mode=\"transpose\",\n batchnorm=True, dim=\"3d\", input_shape=None,\n num_classes=None):\n \"\"\" Init class.\n\n Parameters\n ----------\n latent_dim: int\n the latent dimension.\n in_channels: int, default 1\n number of channels in the input tensor.\n depth: int, default 5\n number of layers in the U-Net.\n start_filts: int, default 64\n number of convolutional filters for the first conv.\n up_mode: string, default 'transpose'\n type of upconvolution. Choices: 'transpose' for transpose\n convolution, 'upsample' for nearest neighbour upsampling.\n batchnorm: bool, default False\n normalize the inputs of the activation function.\n dim: str, default '3d'\n '3d' or '2d' input data.\n input_shape: uplet\n the tensor data shape (X, Y, Z) used during upsample (by default\n use a scale factor of 2).\n num_classes: int, default None\n the number of classes for the conditioning.\n \"\"\"\n # Inheritance\n nn.Module.__init__(self)\n\n # Check inputs\n if dim in (\"2d\", \"3d\"):\n self.dim = dim\n else:\n raise ValueError(\n \"'{}' is not a valid mode for merging up and down paths. Only \"\n \"'3d' and '2d' are allowed.\".format(dim))\n if up_mode in (\"transpose\", \"upsample\"):\n self.up_mode = up_mode\n else:\n raise ValueError(\n \"'{}' is not a valid mode for upsampling. Only 'transpose' \"\n \"and 'upsample' are allowed.\".format(up_mode))\n\n # Declare class parameters\n self.latent_dim = latent_dim\n self.num_classes = num_classes\n self.in_channels = in_channels\n self.start_filts = start_filts\n self.depth = depth\n self.down = []\n self.up = []\n self.shapes = None\n if input_shape is not None:\n self.shapes = self.downsample_shape(\n input_shape, nb_iterations=(depth - 1))\n self.shapes = self.shapes[::-1]\n\n # Create the encoder pathway\n self.hidden_dims = []\n for cnt in range(depth):\n in_channels = self.in_channels if cnt == 0 else out_channels\n out_channels = self.start_filts * (2**cnt)\n self.hidden_dims.append(out_channels)\n pooling = False if cnt == 0 else True\n self.down.append(\n Down(in_channels, out_channels, self.dim, pooling=pooling,\n batchnorm=batchnorm))\n\n # Create the decoder pathway\n # - careful! decoding only requires depth-1 blocks\n for cnt in range(depth - 1):\n in_channels = out_channels\n out_channels = in_channels // 2\n shape = None\n if self.shapes is not None:\n shape = self.shapes[cnt + 1]\n self.up.append(\n Up(in_channels, out_channels, up_mode=up_mode, dim=self.dim,\n merge_mode=\"none\", batchnorm=batchnorm, shape=shape))\n\n # Add the list of modules to current module\n self.down = nn.Sequential(*self.down)\n hidden_dim = self.hidden_dims[-1] * np.prod(self.shapes[0])\n self.mu = nn.Linear(hidden_dim, latent_dim)\n self.var = nn.Linear(hidden_dim, latent_dim)\n self.latent_to_hidden = nn.Linear(latent_dim, hidden_dim)\n self.up = nn.Sequential(*self.up)\n self.conv_final = Conv1x1x1(out_channels, self.in_channels, self.dim)\n self.logit = nn.Tanh()\n\n # Kernel initializer\n self.kernel_initializer()\n\n def encode(self, x):\n \"\"\" Encodes the input by passing through the encoder network\n and returns the latent codes.\n\n Parameters\n ----------\n x: Tensor, (N, C, X, Y, Z)\n input tensor to encode.\n\n Returns\n -------\n mu: Tensor (N, D)\n mean of the latent Gaussian.\n logvar: Tensor (N, D)\n standard deviation of the latent Gaussian.\n \"\"\"\n logger.debug(\"Encode...\")\n self.debug(\"input\", x)\n x = self.down(x)\n self.debug(\"down\", x)\n x = torch.flatten(x, start_dim=1)\n self.debug(\"flatten\", x)\n # Split x into mu and var components of the latent Gaussian\n # distribution\n z_mu = self.mu(x)\n z_logvar = self.var(x)\n self.debug(\"z_mu\", z_mu)\n self.debug(\"z_logvar\", z_logvar)\n return z_mu, z_logvar\n\n def decode(self, x_sample):\n \"\"\" Maps the given latent codes onto the image space.\n\n Parameters\n ----------\n x_sample: Tensor (N, D)\n sample from the distribution having latent parameters mu, var.\n\n Returns\n -------\n x: Tensor, (N, C, X, Y, Z)\n the prediction.\n \"\"\"\n logger.debug(\"Decode...\")\n self.debug(\"x sample\", x_sample)\n x = self.latent_to_hidden(x_sample)\n self.debug(\"hidden\", x)\n x = x.view(-1, self.hidden_dims[-1], *self.shapes[0])\n self.debug(\"view\", x)\n x = self.up(x)\n self.debug(\"up\", x)\n x = self.conv_final(x)\n self.debug(\"final\", x)\n return self.logit(x)\n\n def reparameterize(self, z_mu, z_logvar):\n \"\"\" Reparameterization trick to sample from N(mu, var) from\n N(0,1).\n\n Parameters\n ----------\n mu: Tensor (N, D)\n mean of the latent Gaussian.\n logvar: Tensor (N, D)\n standard deviation of the latent Gaussian.\n\n Returns\n -------\n x_sample: Tensor (N, D)\n sample from the distribution having latent parameters mu, var.\n \"\"\"\n logger.debug(\"Reparameterize...\")\n self.debug(\"z_mu\", z_mu)\n self.debug(\"z_logvar\", z_logvar)\n std = torch.exp(0.5 * z_logvar)\n eps = torch.randn_like(std)\n x_sample = eps.mul(std).add_(z_mu)\n self.debug(\"x sample\", x_sample)\n return x_sample\n\n def forward(self, x):\n logger.debug(\"VUnet...\")\n z_mu, z_logvar = self.encode(x)\n x_sample = self.reparameterize(z_mu, z_logvar)\n predicted = self.decode(x_sample)\n return predicted, {\"z_mu\": z_mu, \"z_logvar\": z_logvar}\n\n\nclass DecodeLoss(object):\n \"\"\" VAE consists of two loss functions:\n\n 1. Reconstruction loss: how well we can reconstruct the image\n 2. KL divergence loss: how off the distribution over the latent space is\n from the prior. Given the prior is a standard Gaussian and the inferred\n distribution is a Gaussian with a diagonal covariance matrix,\n the KL-divergence becomes analytically solvable.\n\n loss = REC_loss + k1 * KL_loss.\n \"\"\"\n def __init__(self, k1=1, rec_loss=\"mse\", nodecoding=False):\n super(DecodeLoss, self).__init__()\n if rec_loss not in (\"mse\", \"bce\"):\n raise ValueError(\"Requested loss not yet supported.\")\n self.layer_outputs = None\n self.k1 = k1\n self.rec_loss = rec_loss\n self.nodecoding = nodecoding\n\n def __call__(self, x_sample, x):\n if self.nodecoding:\n return -1\n if self.layer_outputs is None:\n raise ValueError(\"The model needs to return the latent space \"\n \"distribution parameters z_mu, z_logvar.\")\n z_mu = self.layer_outputs[\"z_mu\"]\n z_logvar = self.layer_outputs[\"z_logvar\"]\n if self.rec_loss == \"bce\":\n recon_loss = func.binary_cross_entropy(\n x_sample, x, reduction=\"sum\")\n else:\n recon_loss = func.mse_loss(\n x_sample, x, reduction=\"mean\")\n # kld_loss = 0.5 * torch.sum(\n # torch.exp(z_logvar) + z_mu**2 - 1.0 - z_logvar)\n kld_loss = torch.mean(-0.5 * torch.sum(\n 1 + z_logvar - z_mu ** 2 - z_logvar.exp(), dim=-1), dim=0)\n\n return recon_loss + self.k1 * kld_loss\n","sub_path":"pynet/models/vae/vunet.py","file_name":"vunet.py","file_ext":"py","file_size_in_byte":9474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"489264933","text":"# -*- coding: utf-8 -*-\n\nfrom deval.component.std.network import NetworkComponent\nfrom deval.component.ios.utils.iosfuncs import IOSProxy, check_platform_ios\nfrom deval.utils.parse import parse_uri\n\n\nclass IOSNetworkComponent(NetworkComponent):\n\n def __init__(self, name, dev, uri):\n self._name = name\n self.device = dev\n try:\n self.proxy = self.device.iosproxy\n except AttributeError:\n self.device.iosproxy = IOSProxy(**check_platform_ios(uri))\n self.proxy = self.device.iosproxy\n\n def get_ip_address(self):\n return self.proxy.driver.status()['ios']['ip']\n \n @property\n def name(self):\n return self._name\n \n @name.setter\n def name(self, value):\n self._name = value\n","sub_path":"deval/component/ios/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"557646444","text":"# brute: 무식한, force: 힘 무식한 힘으로 해석할 수 있다.\n# 완전탐색 알고리즘\n# 가능한 모든 경우의 수를 모두 탐색하면서 요구조건에 충족되는 결과만을 가져온다.\n# 알고리즘 설계의 가장 기본적인 접근 방법은 해가 존재할 것으로 예상되는 모든 영역을 전체 탐색하는 방법\n# 선형 구조를 전체적으로 탐색하는 순차 탐색\n# 비선형 구조를 전체적으로 탐색하는 깊이 우선 탐색(DFS, Depth First Search) - 백트래킹\n# 너비 우선 탐색(BFS, breadth first search)이 가장 기본적인 도구 - 브루트포스\n\n\n# 문제\n# 왕비를 피해 일곱 난쟁이들과 함께 평화롭게 생활하고 있던 백설공주에게 위기가 찾아왔다. 일과를 마치고 돌아온 난쟁이가 일곱 명이 아닌 아홉 명이었던 것이다.\n#\n# 아홉 명의 난쟁이는 모두 자신이 \"백설 공주와 일곱 난쟁이\"의 주인공이라고 주장했다. 뛰어난 수학적 직관력을 가지고 있던 백설공주는, 다행스럽게도 일곱 난쟁이의 키의 합이 100이 됨을 기억해 냈다.\n#\n# 아홉 난쟁이의 키가 주어졌을 때, 백설공주를 도와 일곱 난쟁이를 찾는 프로그램을 작성하시오.\n#\n# 입력\n# 아홉 개의 줄에 걸쳐 난쟁이들의 키가 주어진다. 주어지는 키는 100을 넘지 않는 자연수이며, 아홉 난쟁이의 키는 모두 다르며, 가능한 정답이 여러 가지인 경우에는 아무거나 출력한다.\n#\n# 출력\n# 일곱 난쟁이의 키를 오름차순으로 출력한다. 일곱 난쟁이를 찾을 수 없는 경우는 없다.\n\nimport sys\nsys.stdin=open(\"input.txt\",\"r\")\n\nheight=[int(input()) for _ in range(9)]\nheight.sort(reverse=True)\n\n# 합이 100인 7가지 숫자 구하기\n# 정렬 후 조합\n\nvisited=[0]*9\n\ndef show(arr):\n for i in range(len(arr)-1,-1,-1):\n print(arr[i])\n\ndef comb(arr, start, sum):\n global end\n if sum > 100:\n return\n if len(arr) == 7:\n if sum==100:\n show(arr)\n end=True\n return\n for i in range(start,9):\n if not visited[i]:\n arr.append(height[i])\n comb(arr,i+1,sum+height[i])\n if end:\n return\n arr.pop()\n\nend=False\ncomb([],0,0)\n\n","sub_path":"백준/Brute Force/일곱난쟁이.py","file_name":"일곱난쟁이.py","file_ext":"py","file_size_in_byte":2319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"144849483","text":"import json\n\ni = 0\nMPArray =[]\nTSPArray = []\nEFGPArray =[]\nThreePARArray=[]\nFTrArray=[]\nORBPercentArray=[]\nDRBPercentArray=[]\nTRBPercentArray=[]\nASTArray=[]\nSTLArray=[]\nBLKArray=[]\nTOVArray=[]\nUSGArray=[]\nORtgArray=[]\nDRtgArray=[]\n\n\n\nwhile(i < 11):\n with open(\"G\" + str(i) + \".json\") as json_file:\n data = json.load(json_file)\n #for p in data['ADVANCE']:\n print(data[0][\"STATS\"][\"ADVANCE\"])\n MPArray.append(data[0][\"STATS\"][\"ADVANCE\"][\"MP\"])\n #.append(data[0][\"STATS\"][\"ADVANCE\"][\"FG\"])\n TSPArray.append(data[0][\"STATS\"][\"ADVANCE\"][\"TS%\"])\n ThreePARArray.append(data[0][\"STATS\"][\"ADVANCE\"][\"3PAr\"])\n EFGPArray.append(data[0][\"STATS\"][\"ADVANCE\"][\"eFG%\"])\n \n FTrArray.append(data[0][\"STATS\"][\"ADVANCE\"][\"FTr\"])\n ORBPercentArray.append(data[0][\"STATS\"][\"ADVANCE\"][\"ORB%\"])\n DRBPercentArray.append(data[0][\"STATS\"][\"ADVANCE\"][\"DRB%\"])\n TRBPercentArray.append(data[0][\"STATS\"][\"ADVANCE\"][\"TRB%\"])\n \n ASTArray.append(data[0][\"STATS\"][\"ADVANCE\"][\"AST%\"])\n STLArray.append(data[0][\"STATS\"][\"ADVANCE\"][\"STL%\"])\n BLKArray.append(data[0][\"STATS\"][\"ADVANCE\"][\"BLK%\"])\n TOVArray.append(data[0][\"STATS\"][\"ADVANCE\"][\"TOV%\"])\n USGArray.append(data[0][\"STATS\"][\"ADVANCE\"][\"USG%\"])\n ORtgArray.append(data[0][\"STATS\"][\"ADVANCE\"][\"ORtg\"])\n DRtgArray.append(data[0][\"STATS\"][\"ADVANCE\"][\"DRtg\"])\n \n print()\n print()\n i = i + 1\n \nprint(MPArray)\n\noutputDict = {}\noutputDict['MP'] = MPArray\noutputDict['TS%'] = TSPArray\noutputDict['eFG%'] = EFGPArray\noutputDict['3PAr'] = ThreePARArray\noutputDict['FTr'] = FTrArray\noutputDict['ORB%'] = ORBPercentArray\noutputDict['DRB%'] = DRBPercentArray\noutputDict['TRB%'] = TRBPercentArray\noutputDict['AST%'] = ASTArray\noutputDict['STL%'] = STLArray\noutputDict['BLK%'] = BLKArray\noutputDict['TOV%'] = TOVArray\noutputDict['USG%'] = USGArray\noutputDict['ORtg'] = ORtgArray\noutputDict['DRtg'] = DRtgArray\n#finish\n\noutput = {\"ADVANCE\" : outputDict}\noutputstring = json.dumps(output, indent=4, separators=(',', ': '))\nwith open('mainTeamADVANCE.txt', 'w') as outfile:\n outfile.write(outputstring)\n # print(outputstring)\n\n #print (data.name)\n \n\n\n \n# with open('MP2.txt', 'w') as outfile: \n# json.dump(MPArray, outfile)\n# json.dump(TSPArray, outfile)\n# json.dump(EFGPArray, outfile)\n# json.dump(FGPercentArray, outfile)\n \n# with open('3P.txt', 'w') as outfile: \n# json.dump(ThreePointArray, outfile)\n \n# with open('3PA.txt', 'w') as outfile: \n# json.dump(ThreePA, outfile)\n \n# with open('3P%.txt', 'w') as outfile: \n# json.dump(ThreePercent, outfile)\n \n# with open('FT.txt', 'w') as outfile: \n# json.dump(FTArray, outfile)\n \n# with open('FTA.txt', 'w') as outfile: \n# json.dump(FTAArray, outfile) \n\n# with open('FT%.txt', 'w') as outfile: \n# json.dump(FTPercent, outfile) \n \n# with open('ORB.txt', 'w') as outfile: \n# json.dump(ORBArray, outfile)\n \n# with open('DRB.txt', 'w') as outfile: \n# json.dump(DRBArray, outfile)\n \n# with open('TRB.txt', 'w') as outfile: \n# json.dump(TRBArray, outfile)\n \n# with open('AST.txt', 'w') as outfile: \n# json.dump(ASTArray, outfile)\n \n# with open('STL.txt', 'w') as outfile: \n# json.dump(STLArray, outfile)\n \n# with open('BLK.txt', 'w') as outfile: \n# json.dump(BLKArray, outfile)\n \n# with open('TOV.txt', 'w') as outfile: \n# json.dump(TOVArray, outfile)\n \n# with open('PF.txt', 'w') as outfile: \n# json.dump(PFArray, outfile)\n \n# with open('PTS.txt', 'w') as outfile: \n# json.dump(PTSArray, outfile)","sub_path":"nba/LAL1987/mainTeamAdvancedScript.py","file_name":"mainTeamAdvancedScript.py","file_ext":"py","file_size_in_byte":3680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"196211744","text":"# -*- coding: utf-8 -*-\nfrom ki.utils.wait_calc import WaitCalc\nfrom ki.utils.win_calc import WinCalc\nfrom ki.databaseprocessing.gamelog import GameLog\nfrom ki.utils.tile import Tile\nfrom datetime import datetime\n\n\ndef test_identical_result(hand34):\n finished_hand, win_tile = WaitCalc.waiting_calc_brute(hand34)\n if finished_hand:\n print(\"Result of brute:\")\n WaitCalc.print_waiting_tiles(finished_hand, win_tile)\n finished_hand, win_tile = WaitCalc.waiting_calc(hand34)\n if finished_hand:\n print(\"Result of pruned:\")\n WaitCalc.print_waiting_tiles(finished_hand, win_tile)\n\n\ndef compare_pruned_and_brute_complexity(hand34):\n s = datetime.now()\n for i in range(0, 20):\n WaitCalc.waiting_calc_brute(hand34)\n e = datetime.now()\n print(\" Brute: {}\".format((e-s).microseconds))\n s = datetime.now()\n for i in range(0, 20):\n WaitCalc.waiting_calc(hand34)\n e = datetime.now()\n print(\" Pruned: {}\".format((e - s).microseconds))\n\n\ndef bacth_compare():\n test_cases = list()\n test_cases.append([0, 0, 1, 1, 2, 2, 3, 3, 24, 4, 5, 5, 6]) # not waiting\n test_cases.append([10, 10, 11, 11, 12, 12, 13, 13, 24, 27, 27, 29, 30]) # not waiting\n test_cases.append([1, 3, 4, 12, 12, 12, 23, 24, 25, 31, 31, 33, 33]) # not waiting\n test_cases.append([0, 0, 1, 1, 2, 2, 3, 3, 7, 7, 5, 5, 16]) # seven pairs\n test_cases.append([0, 8, 9, 17, 18, 26, 27, 28, 29, 30, 31, 32, 33]) # guoshi all\n test_cases.append([0, 8, 9, 17, 17, 26, 27, 28, 29, 30, 31, 32, 33]) # guoshi one\n test_cases.append([1, 2, 3, 6, 7, 8, 12, 13, 15, 16, 23, 23, 23]) # not waiting two opens\n test_cases.append([1, 2, 3, 6, 7, 8, 12, 13, 14, 16, 19, 23, 23]) # not waiting two singles\n for case in test_cases:\n print(case)\n compare_pruned_and_brute_complexity(case)\n\n\ndef main():\n # test_win_parse()\n # GameLog.print_single_round(612, 6)\n # GameLog.print_game_by_id(612)\n bacth_compare()\n\n\n\nif __name__ == '__main__':\n main()","sub_path":"ki/utils/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":2026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"199630107","text":"from django.db import models\nfrom datetime import datetime\n# from django.urls import reverse\n# import uuid # Required for unique book instances\n\nclass ToDo(models.Model):\n text = models.CharField(max_length=100, verbose_name='Задача')\n created_at = models.DateTimeField(auto_now_add=True, verbose_name='Дата') # 'models.DateField' bylo stalo s 'Time' iz-za etogo konflikta v baze oshybka sprosit mentorov kak pravilno vnosit izmeneniya v kode v baze dannyh \n is_closed = models.BooleanField(default=False, verbose_name='Выполнена')\n is_favorite = models.BooleanField(default=False, verbose_name='Избранная')\n \n class Meta:\n verbose_name = 'Задача'\n verbose_name_plural = 'Задачи'\n\n\nclass BookShop(models.Model):\n title = models.CharField(max_length=60, verbose_name='Заголовок')\n subtitle = models.CharField(max_length=50, verbose_name='Подзаголовок')\n description = models.CharField(max_length=650, verbose_name='Описание')\n price = models.IntegerField(verbose_name='Цена')\n genre = models.CharField(max_length=60, verbose_name='Жанр')\n author = models.CharField(max_length=35, verbose_name='Автор')\n is_favorite = models.BooleanField(default=False, verbose_name='Избранная')\n year = models.DateTimeField(verbose_name='Год выпуска книги')\n date = models.DateField(auto_now_add=True, verbose_name='Добавление книги на сайт')\n \n\n class Meta:\n verbose_name = 'Книжный магазин'\n verbose_name_plural = 'Книжные магазины'\n\n\n# class Genre(models.Model):\n# \"\"\"\n# Model representing a book genre (e.g. Science Fiction, Non Fiction).\n# \"\"\"\n# name = models.CharField(max_length=200, help_text=\"Enter a book genre (e.g. Science Fiction, French Poetry etc.)\")\n\n# class Book(models.Model):\n# \"\"\"\n# Model representing a book (but not a specific copy of a book).\n# \"\"\"\n# title = models.CharField(max_length=200)\n# author = models.ForeignKey('Author', on_delete=models.SET_NULL, null=True)\n# # Foreign Key used because book can only have one author, but authors can have multiple books\n# # Author as a string rather than object because it hasn't been declared yet in the file.\n# # summary = models.TextField(max_length=1000, help_text=\"Enter a brief description of the book\")\n# # isbn = models.CharField('ISBN',max_length=13, help_text='13 Character ISBN number')\n# genre = models.ManyToManyField(Genre, help_text=\"Select a genre for this book\")\n\n# class BookInstance(models.Model):\n# \"\"\"\n# Model representing a specific copy of a book (i.e. that can be borrowed from the library).\n# \"\"\"\n# id = models.UUIDField(primary_key=True, default=uuid.uuid4, help_text=\"Unique ID for this particular book across whole library\")\n# book = models.ForeignKey('Book', on_delete=models.SET_NULL, null=True)\n# imprint = models.CharField(max_length=200)\n# due_back = models.DateField(null=True, blank=True)\n\n# LOAN_STATUS = (\n# ('m', 'Maintenance'),\n# ('o', 'On loan'),\n# ('a', 'Available'),\n# ('r', 'Reserved'),\n# )\n\n# status = models.CharField(max_length=1, choices=LOAN_STATUS, blank=True, default='m', help_text='Book availability')\n\n# class Meta:\n# ordering = [\"due_back\"]\n\n# class Author(models.Model):\n# \"\"\"\n# Model representing an author.\n# \"\"\"\n# first_name = models.CharField(max_length=100)\n# last_name = models.CharField(max_length=100)\n# date_of_birth = models.DateField(null=True, blank=True)\n# date_of_death = models.DateField('Died', null=True, blank=True)\n\n# class Entry(models.Model):\n# blog = models.ForeignKey(Book, on_delete=models.CASCADE)\n# headline = models.CharField(max_length=255)\n# body_text = models.TextField()\n# pub_date = models.DateField()\n# mod_date = models.DateField()\n# authors = models.ManyToManyField(Author)\n# number_of_comments = models.IntegerField()\n# number_of_pingbacks = models.IntegerField()\n# rating = models.IntegerField()\n ","sub_path":"main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"275602068","text":"import os\nimport numpy as np\nimport cv2\nfrom dotenv import find_dotenv, load_dotenv\nload_dotenv(find_dotenv())\n\ntry:\n conmaxright=int(os.getenv(\"envmaxright\"))\n conmaxleft=int(os.getenv(\"envmaxleft\"))\n conresistleft=int(os.getenv(\"envresistleft\"))\n conresistright=int(os.getenv(\"envresistright\"))\n conresistcentre=int(os.getenv(\"envresistcentre\"))\nexcept TypeError:\n conmaxright=9999\n conmaxleft=0\n conresistleft=9999\n conresistright=99999\n conresistcentre=0\n print(\"An exception occurred\")\n #may now set value to something to force calibration\n\n# http://www.pyimagesearch.com/2014/08/04/opencv-python-color-detection/\n# define the list of boundaries\n# boundaries = [([0, 0, 0], [40, 40, 40])]\n# green\n# boundaries = [([10, 100, 10], [100, 255, 100])]\n# orange\n# boundaries = [([0, 50, 100], [100, 200, 255])]\n\n# iphone video\n# contourmin = 3000\ncontourmin = 800\n\n\ndef kitemask(c, frame, kitecolours='kite1'):\n # This sets the properties for the kite we are looking for\n # setup for now is just for kite1 but we can be looking for in\n # different conditions and this might affect the colours\n # so think we amend this to add the object for indoorkite\n # and \n if cv2.contourArea(c) < contourmin:\n return 0\n if kitecolours == 'indoorkite':\n boundaries = [([10, 10, 140], [70, 70, 200])]\n elif kitecolours == 'kite1':\n boundaries = [([0, 0, 100], [100, 100, 255]),\n ([0, 50, 100], [120, 220, 255])\n ]\n else: # 'kite2'\n boundaries = [([0, 0, 0], [30, 30, 30]),\n ([10, 10, 100], [100, 100, 255]),\n ([0, 50, 100], [120, 220, 255])\n ]\n # iphone\n boundaries = [([0, 0, 100], [100, 100, 255]),\n ([0, 50, 150], [120, 220, 255])\n ]\n\n totmask = 1\n for (lower, upper) in boundaries:\n # create NumPy arrays from the boundaries\n low = np.array(lower, dtype=\"uint8\")\n upp = np.array(upper, dtype=\"uint8\")\n\n (x, y, w, h) = cv2.boundingRect(c)\n roi = frame[y:y + h, x:x + w]\n # loop over the boundaries\n mask = cv2.inRange(roi, low, upp)\n totmask *= np.sum(mask)\n print(x, y, w, h, \"cont\", cv2.contourArea(c))\n print(\"mask: \", np.sum(mask), totmask)\n return totmask\n\n\ndef checklimits(angle, maxleft, maxright):\n \"\"\"\n :param angle:\n :param maxleft:\n :param maxright:\n :return:\n\n >>> checklimits(50, -45, 30)\n 30\n >>> checklimits(-50, -45, 30)\n -45\n >>> checklimits(-20, -45, 30)\n -20\n \"\"\"\n\n angle = max(angle, maxleft)\n angle = min(angle, maxright)\n return angle\n\n\ndef getangle(resistance, maxleft=conmaxleft, maxright=conmaxright,\n resistleft=conresistleft, resistright=conresistright, resistcentre=conresistcentre):\n \"\"\"\n :param resistcentre:\n :param resistright:\n :param resistleft:\n :param maxright:\n :param maxleft:\n :param resistance:\n :return angle:\n\n >>> getangle(267)\n 20\n >>> getangle(200)\n 0\n >>> getangle(110)\n -30\n >>> getangle(155)\n -15\n \"\"\"\n\n # calibration is based on 0 being the centre and maxleft and maxright being\n # defined in degrees - the corrsesponding values of the resistor should be taken\n # for all of these and we will for now assume resistor is linear - have now changed\n # so that values beyond maxleft and maxright should be supported\n\n if resistance > resistcentre:\n angle = ((resistance - resistcentre) * maxright) / (resistright - resistcentre)\n elif resistance < resistcentre:\n angle = ((resistance - resistcentre) * maxleft) / (resistleft - resistcentre)\n else:\n angle = 0\n return int(angle)\n\n\ndef getresist(angle, maxleft=conmaxleft, maxright=conmaxright, resistleft=conresistleft,\n resistright=conresistright, resistcentre=conresistcentre):\n \"\"\"\n :param resistcentre:\n :param resistright:\n :param resistleft:\n :param maxright:\n :param maxleft:\n :param angle:\n :return angle:\n\n >>> getresist(-30)\n 110\n >>> getresist(-15)\n 155\n >>> getresist(0)\n 200\n >>> getresist(20)\n 267\n >>> getresist(10)\n 233\n \"\"\"\n\n # calibration is based on 0 being the centre and maxleft and maxright being\n # defined in degrees - the corrsesponding values of the resistor should be taken\n # for all of these and we will for now assume resistor is linear\n\n if angle < 0:\n resistance = resistleft + ((angle - maxleft) * (resistcentre - resistleft) / (0 - maxleft))\n elif angle > 0:\n resistance = resistright + ((maxright - angle) * (resistcentre - resistright) / maxright)\n else:\n resistance = resistcentre\n return int(resistance)\n\n\ndef get_action(output, barangle):\n # Now added ability to send motor message 6 for leftonly and 7 for rightonly\n # and will now add speed into the message as %age of max value up to 99 but 0 is max speed\n \"\"\"\n :param output:\n :param barangle:\n :return action:\n\n >>> get_action(-10, -5)\n 300\n >>> get_action(0, 0)\n 0\n \"\"\"\n\n MAXLEFT = conmaxleft # These are to try and avoid breaking the bar\n MAXRIGHT = conmaxright # similarly to protect bar as attached close to pivot\n TOLERANCE = 1 # degreee of tolerance\n action = 0\n if abs(output) < TOLERANCE:\n action = 0 # stop\n elif output < 0 and barangle > MAXLEFT:\n action = 300 # Left\n elif output > 0 and barangle < MAXRIGHT:\n action = 400 # Right\n # TODO think about how PID impacts this if at all - speed should prob be used\n # action = int(msg + speed) if 0 < speed < 100 else int(msg)\n return action\n\n\ndef _test():\n import doctest\n doctest.testmod(verbose=False)\n\n\nif __name__ == '__main__':\n 'Can run with -v option if you want to confirm tests were run'\n _test()\n","sub_path":"scripts/kite_funcs.py","file_name":"kite_funcs.py","file_ext":"py","file_size_in_byte":5971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"100348074","text":"import urllib\nimport requests\nimport sys\nimport pprint\nfrom biothings_client import get_client\n\n\nclass SmartAPI:\n API_BASE_URL = 'http://smart-api.info/api/query'\n TIMEOUT_SEC = 120\n\n @staticmethod\n def query(search_term, field=None):\n if field is None:\n url = SmartAPI.API_BASE_URL + '/' + '?' + 'q=' + search_term + '&size=30'\n else:\n search = field + \":\" + search_term\n url = SmartAPI.API_BASE_URL + '/' + '?' + 'q=' + search + '&size=30'\n\n try:\n res = requests.get(url,\n timeout=SmartAPI.TIMEOUT_SEC)\n except requests.exceptions.Timeout:\n print(url, file=sys.stderr)\n print('Timeout in QueryChEMBL for URL: ' + url, file=sys.stderr)\n return None\n status_code = res.status_code\n if status_code != 200:\n print(url, file=sys.stderr)\n print('Status code ' + str(status_code) + ' for url: ' + url, file=sys.stderr)\n return None\n return res.json()\n\n @staticmethod\n def search_titles(search_term):\n result = SmartAPI.query(search_term, field='info.title')\n titles = []\n for r in result:\n if r == 'hits':\n #for h in result[r]:\n # print(h)\n #print(result[r]['info']['title'])\n for h in result[r]:\n titles.append(h['info']['title'])\n return titles\n\n @staticmethod\n def search_tags(search_term):\n \"\"\"\n Params: Search Term (string)\n Return: Titles of matched Smart API Beacons (list)\n Description: Searches SMART APIs for those with tags matching search term.\n \"\"\"\n search_term = search_term + '&translator'\n result = SmartAPI.query(search_term, field='tags.name')\n titles = []\n for r in result:\n if r == 'hits':\n #for h in result[r]:\n # print(h)\n #print(result[r]['info']['title'])\n for h in result[r]:\n title = h['info']['title']\n try:\n description = h['info']['description']\n except KeyError:\n description = 'Missing'\n _id = h['_id']\n tags = [i['name'] for i in h['tags'] if i['name'] != 'translator']\n titles.append( {'title': title, 'description': description, 'id': _id, 'tags': tags} )\n return titles\n\n @staticmethod\n def search_all(search_term):\n \"\"\"\n Params: Search Term (string)\n Return: Titles of matched Smart API Beacons (list)\n Description: Searches all SMART API documentation fields for search term.\n \"\"\"\n result = SmartAPI.query(search_term)\n titles = []\n for r in result:\n if r == 'hits':\n #for h in result[r]:\n # print(h)\n # print(result[r]['info']['title'])\n for h in result[r]:\n titles.append(h['info']['title'])\n\n return titles\n\n\n @staticmethod\n def search_all_tags(search_term='translator'):\n \"\"\"\n Params: None\n Return: Unique tags in Smart API registry associated with Translator APIs (list)\n Description: Get a list of unique translator API tags.\n \"\"\"\n result = SmartAPI.query(search_term, field='tags.name')\n tags = []\n for r in result:\n if r == 'hits':\n #for h in result[r]:\n # print(h)\n #print(result[r]['info']['title'])\n for h in result[r]:\n for tag in h['tags']:\n tags.append(tag['name'])\n return list(set(tags))\n\n @staticmethod\n def query_api(api, entity):\n if api == \"MyChem.info&API\":\n url = 'http://mychem.info/v1/query?q=' + entity\n try:\n res = requests.get(url, timeout=SmartAPI.TIMEOUT_SEC)\n except requests.exceptions.Timeout:\n print(url, file=sys.stderr)\n print('Timeout in for URL: ' + url, file=sys.stderr)\n return None\n status_code = res.status_code\n if status_code != 200:\n print(url, file=sys.stderr)\n print('Status code ' + str(status_code) + ' for url: ' + url, file=sys.stderr)\n return None\n result = res.json()\n else:\n return\n output = []\n for r in result:\n if r == 'hits':\n h = result[r]\n output = [i for i in h[0].keys() if not i.startswith('_')]\n return output\n\n #@staticmethod\n #def\n\nif __name__ == '__main__':\n s = SmartAPI\n print(s.search_titles('drug'))\n print(s.search_all('drug'))\n print(s.search_all('*'))\n print(s.search_tags('gene'))\n\n\n\n\n\n\n\n\n\n# query parameters paths.parameters:drug - doesn't work\n# query title info.title:gene","sub_path":"backend/app/user_functions/API/SmartAPI.py","file_name":"SmartAPI.py","file_ext":"py","file_size_in_byte":5059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"216533405","text":"import random\nimport copy\ndef getArray (size):\n ans =[]\n for i in range(size):\n ans.append(random.randint(1,101))\n return ans\ndef bubble(array):\n ans=array\n print(array)\n #create a loop to go trough every pair\n cont = True\n while cont:\n cont=False\n for i in range(len(array)-1):\n if array[i]>array[i+1]:\n #check to see if two items are in order\n #if not swap them\n temp=array[i]\n array[i]=array[i+1]\n array[i+1]=temp\n cont=True\n #continue until no changes are made\n print (ans)\n return ans","sub_path":"Sort.py","file_name":"Sort.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"249483685","text":"import logging\nimport os\n\nfrom skippy.data.redis import get_files_size, get_storage_nodes, get_dl_bandwidth, get_storage_bucket, get_pod_node_name\nfrom skippy.data.utils import get_bucket_urn\n\n\ndef get_best_node(urn: str):\n node_name = get_pod_node_name(os.environ['HOSTNAME'])\n\n file_size = get_files_size(urn)\n\n time = 0\n max_bw_storage = None\n\n storage_nodes = get_storage_nodes(urn)\n if storage_nodes is None:\n storage_nodes = get_storage_bucket(get_bucket_urn(urn))\n # find the storage node that holds the data and has the minimal bandwidth in the required direction (down)\n max_bw = 0\n logging.debug('Calculations node [%s] to storage options %s to transfer %s' % (node_name, storage_nodes, urn))\n for storage in storage_nodes:\n logging.debug('Node [%s] to storage [%s]' % (node_name, storage))\n if storage == node_name:\n logging.debug('Node and storage the same. Time = 0')\n return storage\n\n bandwidth = get_dl_bandwidth(storage, node_name)\n if bandwidth > max_bw:\n max_bw = bandwidth\n max_bw_storage = storage\n\n if max_bw_storage and file_size:\n time += int(file_size / max_bw)\n logging.debug('[%s] is the best storage from node [%s] to transfer %s. Time = %s' % (\n max_bw_storage, node_name, urn, time))\n return max_bw_storage\n","sub_path":"skippy/data/priorities.py","file_name":"priorities.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"164360772","text":"from django.conf import settings\nfrom django.http import HttpResponse, HttpResponseForbidden, HttpResponseBadRequest\nfrom django.views.decorators.csrf import csrf_exempt\nfrom twilio.request_validator import RequestValidator\nfrom twilio.twiml.messaging_response import MessagingResponse\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils import timezone\nimport datetime\nimport twilio.rest\nimport logging\nfrom . import tasks\nfrom . import models\nimport operator_interface.tasks\nfrom operator_interface.models import Message\n\n\nlogger = logging.getLogger(__name__)\ntwilio_client = twilio.rest.Client(settings.TWILIO_ACCOUNT, settings.TWILIO_TOKEN)\ntwilio_validator = RequestValidator(settings.TWILIO_TOKEN)\n\n\ndef check_auth(f):\n def new_f(request, *args, **kwargs):\n uri = f\"https://{request.get_host()}{request.path}\"\n sig_valid = twilio_validator.validate(\n uri, request.POST, request.META.get(\"HTTP_X_TWILIO_SIGNATURE\")\n )\n if not sig_valid:\n return HttpResponseForbidden()\n\n return f(request, *args, **kwargs)\n\n return new_f\n\n\n@csrf_exempt\n@check_auth\ndef webhook(request):\n logger.debug(f\"Got event from twilio whatsapp webhook: {request.POST}\")\n\n msg_from = request.POST.get(\"From\")\n if not msg_from.startswith(\"whatsapp:\"):\n return HttpResponseBadRequest()\n msg_from = msg_from[len(\"whatsapp:\"):]\n msg_id = request.POST.get(\"MessageSid\")\n tasks.handle_whatsapp.delay(msg_id, msg_from, request.POST)\n\n response = MessagingResponse()\n return HttpResponse(str(response))\n\n\n@csrf_exempt\ndef notif_webhook(request):\n logger.debug(f\"Got event from twilio whatsapp status webhook: {request.POST}\")\n\n msg_id = request.POST.get(\"MessageSid\")\n msg_status = request.POST.get(\"MessageStatus\")\n msc_error_code = request.POST.get(\"ErrorCode\")\n msg: Message = Message.objects.filter(platform_message_id=msg_id).first()\n\n if msg_status == \"delivered\" and msg and msg.state != Message.READ:\n msg.state = Message.DELIVERED\n msg.save()\n elif msg_status == \"failed\" and msg:\n if msc_error_code == \"63003\":\n tasks.attempt_alternative_delivery.delay(msg.id)\n else:\n msg.state = Message.FAILED\n msg.save()\n elif msg_status == \"read\" and msg:\n msg.state = Message.READ\n msg.save()\n\n return HttpResponse(\"\")\n\n\n@login_required\ndef account_linking(request):\n state = request.GET.get(\"state\")\n\n try:\n state = models.AccountLinkingState.objects.get(id=state)\n except models.AccountLinkingState.DoesNotExist:\n return HttpResponseBadRequest()\n\n if state.timestamp + datetime.timedelta(minutes=5) < timezone.now():\n return HttpResponseBadRequest()\n state.conversation.conversation.update_user_id(request.user.username)\n state.delete()\n\n message = Message(\n platform=state.conversation,\n text=\"Login complete, thanks!\",\n direction=Message.TO_CUSTOMER,\n )\n message.save()\n operator_interface.tasks.process_message.delay(message.id)\n\n return HttpResponse(\n '

You can now close this window

'\n )\n","sub_path":"django/whatsapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"247427383","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\n# In[ ]:\n\n\ndf=pd.read_csv(\"SuperCenterDataNew.csv\")\ndf.head()\n\n\n# In[ ]:\n\n\ntransactions = []\nfor i in range(0, 7501):\n transactions.append([str(df.values[i,j]) for j in range(0, 20)])\n\n\n# In[ ]:\n\n\nfrom apyori import apriori\nrules = apriori(transactions, min_support = 0.003, min_confidence = 0.2, min_lift = 3, min_length = 2)\n\n\n# In[ ]:\n\n\n\n\n\n# In[10]:\n\n\ndef getConfidence(items_given, support_items, countDict):\n items_given.sort()\n support_items = support_items + items_given\n support_items.sort()\n items_given_str = \"_\".join(items_given)\n item_support_str = \"_\".join(support_items)\n item_support = item_support_str\n items = list(countDict.keys())\n if (items_given_str not in items) or (item_support not in items):\n return 0\n else:\n return (countDict[item_support]/ countDict[items_given_str])\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Untitled5.py","file_name":"Untitled5.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"72912551","text":"import glob\nimport os\nimport collections\n\nimport dbus\n\nimport salt.utils\n\n\ndef echo(*args, **kws):\n return { 'args': args, 'kws': kws }\n\ndef status (*args, **kws):\n global opt\n opt = { 'upstart_service_name': 'machineer-mount'\n , 'upstart_service_separator': ' on '\n , 'conf': '/etc/machineer'\n }\n\n opt = _tree_merge ([opt, kws])\n\n ret = {}\n\n ret['src exists'] = True if sum ([\n __salt__['file.is_blkdev'] ( kws['src'])\n , __salt__['file.directory_exists'] ( kws['src'])\n ]) else False\n ret['tgt exists'] = True if __salt__['file.directory_exists'] (kws['tgt']) else False\n ret['exists'] = True if ret['src exists'] and ret['tgt exists'] else False\n\n ret['running'] = True if len ([ obj for obj in _initctl_machineer_mounts()\n if { 'name': _initctl_split_name(obj)\n , 'state': obj['state']\n } == { 'name': { k: opt[k] for k in ['src', 'tgt'] }, 'state': 'running' }\n ]) else False\n\n ret ['enabled'] = True if len ( reduce ( lambda x, y: x + y, [\n [ True for line\n in open (path, 'r') .read() .splitlines()\n if line.split() == {k: opt[k] for k in ['src', 'tgt']} .values()\n ]\n for path\n in ( glob.glob (os.path.join (opt ['conf'], 'fstab.d', '*'))\n + [os.path.join (opt ['conf'], 'fstab')]\n )\n if os.path.isfile (path) ], [] )) else False\n\n return ret\n\n# def _mounts ():\n\n# Basically I don't need this one anymore.\n# I decided against it for the following reasons:\n# 1. There's no way to determine the src when bind mounting.\n# 2. You have to readlink everything because you never know if you've a symlink in /dev.\n\n# return [ dict ( zip ( line.split(), ['src', 'tgt', 'fs', 'opt', 'dummy_1', 'dummy_2'] ))\n# for line in __salt__['cmd.run_all']('cat /proc/mounts')['stdout'].splitlines() ]\n\n\ndef _initctl_machineer_mounts ():\n return [ dbus.SystemBus().get_object ('com.ubuntu.Upstart', bus_object_path)\n .GetAll ('com.ubuntu.Upstart0_6.Instance'\n , dbus_interface = dbus.PROPERTIES_IFACE)\n\n for bus_object_path in\n dbus.SystemBus().get_object ( 'com.ubuntu.Upstart'\n , dbus.SystemBus().get_object ('com.ubuntu.Upstart', '/com/ubuntu/Upstart')\n . GetJobByName ( opt['upstart_service_name']\n , dbus_interface='com.ubuntu.Upstart0_6' )\n )\n . GetAllInstances (dbus_interface='com.ubuntu.Upstart0_6.Job')\n ]\n\ndef _initctl_split_name (obj):\n return dict ( zip ( ['src', 'tgt'], obj ['name'] .split(opt['upstart_service_separator']) ))\n\ndef _tree_merge (trees):\n\n def update(d, u):\n for k, v in u.iteritems():\n if isinstance(v, collections.Mapping):\n r = update(d.get(k, {}), v)\n d[k] = r\n else:\n d[k] = u[k]\n return d\n\n return reduce (update, trees)\n","sub_path":"_modules/machineer-mount-helpers.py","file_name":"machineer-mount-helpers.py","file_ext":"py","file_size_in_byte":3031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"564994186","text":"'''\nAuthor: Satoshi Tsutsui \n'''\n\nimport argparse\nimport sys\nimport os\nimport time\nimport tensorflow as tf\n\nfrom os.path import join\n# project_dir: Recommedation/\nproject_dir = os.path.abspath('./')\n'''\nwhile project_dir[-3:] != 'src':\n project_dir = os.path.abspath(join(project_dir, os.pardir))\nproject_dir = join(project_dir, '..')\n'''\ncorpus_dir = join(project_dir, 'corpus')\nsys.path.append(project_dir)\nfrom src.metapath2vec.skipgram import build_model,traning_op,train\nfrom src.metapath2vec.dataset import Dataset\n\ndef parse_args(embedDim, nEpoch, windowSize = 3):\n #Parses the arguments.\n parser = argparse.ArgumentParser(description=\"metapath2vec\")\n parser.add_argument('--walks',type=str,default = join(corpus_dir, 'random_walk.txt'), help='text file that has a random walk in each line. A random walk is just a seaquence of node ids separated by a space.')\n parser.add_argument('--types',type=str,default = join(corpus_dir, 'typeMap.txt'), help='text file that has node types. each line is \"node id node type\"')\n parser.add_argument('--epochs',type=int,default = nEpoch, help='number of epochs')\n # parser.add_argument('--batch',type=int,default=1, help='Batch size.Only batch one is supported now...')\n parser.add_argument('--lr',type=float,default=0.01, help='learning rate')\n parser.add_argument('--log', default = join(corpus_dir, './log'),type=str,help='log directory')\n parser.add_argument('--log-interval',default=-1,type=int,help='log intervals. -1 means per epoch')\n parser.add_argument('--max-keep-model',default=10,type=int,help='number of models to keep saving')\n parser.add_argument('--embedding-dim',default = embedDim,type=int,help='embedding dimensions')\n parser.add_argument('--negative-samples',default = 5,type=int,help='number of negative samples')\n parser.add_argument('--care-type',default = 0,type=int,help='care type or not. if 1, it cares (i.e. heterogeneous negative sampling). If 0, it does not care (i.e. normal negative sampling). ')\n parser.add_argument('--window',default = windowSize,type=int,help='context window size')\n\n return parser.parse_args()\n\ndef main(args):\n if os.path.isdir(args.log):\n print(\"%s already exist. are you sure to override? Ok, I'll wait for 5 seconds. Ctrl-C to abort.\"%args.log)\n time.sleep(5)\n os.system('rm -rf %s/'%args.log)\n else:\n os.makedirs(args.log)\n print(\"made the log directory\",args.log)\n \n tf.reset_default_graph()\n dataset=Dataset(random_walk_txt=args.walks,node_type_mapping_txt=args.types,window_size=args.window)\n print(dataset)\n center_node_placeholder,context_node_placeholder,negative_samples_placeholder,loss = build_model(BATCH_SIZE=1,VOCAB_SIZE=len(dataset.nodeid2index),EMBED_SIZE=args.embedding_dim,NUM_SAMPLED=args.negative_samples)\n optimizer = traning_op(loss,LEARNING_RATE=args.lr)\n train(center_node_placeholder,context_node_placeholder,negative_samples_placeholder,loss,dataset,optimizer,NUM_EPOCHS=args.epochs,BATCH_SIZE=1,NUM_SAMPLED=args.negative_samples,care_type=args.care_type,LOG_DIRECTORY=args.log,LOG_INTERVAL=args.log_interval,MAX_KEEP_MODEL=args.max_keep_model)\n\nif __name__ == \"__main__\":\n embedDim = 100\n args = parse_argse(embedDim)\n main(args)\n","sub_path":"src/metapath2vec/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"68292045","text":"from django.shortcuts import render\nfrom django.contrib import messages\nimport math\nimport requests\nfrom .models import City\nfrom .forms import CityForm\nfrom .models import Forecast_1_OWM\nfrom .models import Forecast_1_Weatherbit\nfrom .models import Forecast_1_here\nfrom .models import Forecast_1_WWO\nfrom .unit_converter import parse_dms\nfrom datetime import datetime\nfrom datetime import date\nimport time\nimport re\n\n\ndef current_weather_processing(request):\n \"\"\"Imports all available forecasts for the weather providers and the existing city objects. In a next step, the forecasts are turned into forecast objects and saved in the database.\"\"\"\n cities = City.objects.all() # return all the cities in the database\n\n # specification of relevant URLs (geodata + weather_providers)\n\n url_geodata = 'https://api.opencagedata.com/geocode/v1/json?q={}&key=1e73e20428e54172a2795c05a59cafab'\n\n url_weather_1 = 'https://api.openweathermap.org/data/2.5/onecall?lat={}&lon={}&units=metric&exclude=hourly&appid=d057965cf56ff66207b004eab30415b8'\n url_weather_2 = 'https://api.weatherbit.io/v2.0/forecast/daily?lat={}&lon={}&key=eb37c9d0e8204376a376ae29539d8fec&units=M&days=7'\n url_weather_3 = 'https://weather.ls.hereapi.com/weather/1.0/report.json?apiKey=VCeAX-isAP-r2K2JzUfkgMe63dSEAbS-KIO1WUjL0FI&product=forecast_7days_simple&latitude={}&longitude={}'\n url_weather_4 = 'http://api.worldweatheronline.com/premium/v1/weather.ashx?key=220c64fed4a44bed8d293252201705&q={},{}&num_of_days=5&tp=24&format=json&extra=localObsTime'\n\n if request.method == 'POST': # only true if form is submitted\n form = CityForm(request.POST) # add actual request data to form for processing\n form.save() # will validate and save if validate\n form = CityForm()\n\n weather_data = [] # container for the city names\n\n for city in cities:\n weather = {\n 'city': city, # save the name for each active city\n }\n weather_data.append(weather) # add the name for the current city into our list\n\n city_geodata = requests.get(\n url_geodata.format(city)).json() # request the API data and convert the JSON to Python data types\n if city_geodata[\"total_results\"] == 0:\n messages.error(request, \"Error\")\n break\n\n city_countrycode = city_geodata[\"results\"][0][\"components\"][\n \"ISO_3166-1_alpha-3\"] # extraction of countrycode for the respective city\n lat_param = parse_dms(city_geodata[\"results\"][0][\"annotations\"][\"DMS\"][\"lat\"]) # longitude and latitude data\n lng_param = parse_dms(city_geodata[\"results\"][0][\"annotations\"][\"DMS\"][\"lng\"])\n\n city_weather_1 = requests.get(url_weather_1.format(lat_param,\n lng_param)).json() # request the API data and convert the JSON to Python data types\n city_weather_2 = requests.get(url_weather_2.format(lat_param, lng_param)).json()\n city_weather_3 = requests.get(url_weather_3.format(lat_param, lng_param)).json()\n city_weather_4 = requests.get(url_weather_4.format(lat_param, lng_param)).json()\n\n day_unix = int(\n (time.time() - time.time() % 86400) / 86400) -1 # number of the current day (makes day uniquely identifiable)\n day_human = '03/06/2020'#date.today().strftime(\"%d/%m/%Y\") # day in human-readable format (will be displayed in the table)\n\n # create forecast objects for every city and one to four days in the future\n for forecast_period in range(1, 5):\n Forecast_1_OWM.objects.create(\n forecast_provider='OpenWeatherMap', # denotes the forecast provider\n day_human=day_human, # human-readable timestamp (see above)\n day_unix=day_unix, # day as number (see above)\n city=city,\n countrycode=city_countrycode,\n forecast_pressure_1=city_weather_1['daily'][forecast_period]['pressure'],\n # OpenWeatherMap forecast data - pressure\n forecast_humidity_1=city_weather_1['daily'][forecast_period]['humidity'],\n # OpenWeatherMap forecast data - humidity\n forecast_max_temp_1=city_weather_1['daily'][forecast_period]['temp']['max'],\n # OpenWeatherMap forecast data - maximum temperature\n forecast_min_temp_1=city_weather_1['daily'][forecast_period]['temp']['min'],\n # OpenWeatherMap forecast data - minimum temperature\n forecast_temperature_1=city_weather_1['daily'][forecast_period]['temp']['day'],\n # OpenWeatherMap forecast data - average temperature\n forecast_description=city_weather_1['daily'][forecast_period]['weather'][0]['description'],\n # OpenWeatherMap forecast data - forecast description\n forecasted_day=day_unix + forecast_period,\n # date for which the forecast should be accurate (as a numeric timestamp)\n forecast_period=forecast_period,\n # how many days are between the day when the forecast is made and the day to which it refers\n forecast_icon=city_weather_1['daily'][forecast_period]['weather'][0]['icon'], # icon\n name=str(city) + '_' + str(day_unix + forecast_period) + '_fp' + str(\n forecast_period) + '_OpenWeatherMap'\n # code to make the forecast recognizable (contains city, prediction day, predicted day, forecast period, provider)\n )\n\n Forecast_1_Weatherbit.objects.create(\n forecast_provider='Weatherbit',\n # this block creates the Forecast objects for the Weatherbit provider (similar structure as with OpenWeatherMap)\n day_human=day_human,\n day_unix=day_unix,\n city=city,\n countrycode=city_countrycode,\n forecast_pressure_1=city_weather_2['data'][forecast_period]['pres'],\n forecast_humidity_1=city_weather_2['data'][forecast_period]['rh'],\n forecast_max_temp_1=city_weather_2['data'][forecast_period]['max_temp'],\n forecast_min_temp_1=city_weather_2['data'][forecast_period]['min_temp'],\n forecast_temperature_1=city_weather_2['data'][forecast_period]['temp'],\n forecast_description=city_weather_2['data'][forecast_period]['weather']['description'],\n forecasted_day=day_unix + forecast_period,\n forecast_period=forecast_period,\n forecast_icon=city_weather_2['data'][forecast_period]['weather']['icon'],\n name=str(city) + '_' + str(day_unix + forecast_period) + '_fp' + str(forecast_period) + '_Weatherbit'\n # code to make the forecast recognizable (contains city, prediction day, predicted day, forecast period, provider)\n )\n\n Forecast_1_here.objects.create(\n forecast_provider='here.com',\n # this block creates the Forecast objects for the here.com provider (similar structure as with OpenWeatherMap)\n day_human=day_human,\n day_unix=day_unix,\n city=city,\n countrycode=city_countrycode,\n forecast_pressure_1=city_weather_3['dailyForecasts']['forecastLocation']['forecast'][forecast_period][\n 'barometerPressure'],\n forecast_humidity_1=city_weather_3['dailyForecasts']['forecastLocation']['forecast'][forecast_period][\n 'humidity'],\n forecast_max_temp_1=city_weather_3['dailyForecasts']['forecastLocation']['forecast'][forecast_period][\n 'highTemperature'],\n forecast_min_temp_1=city_weather_3['dailyForecasts']['forecastLocation']['forecast'][forecast_period][\n 'lowTemperature'],\n forecast_temperature_1=math.ceil(float(\n city_weather_3['dailyForecasts']['forecastLocation']['forecast'][forecast_period][\n 'lowTemperature']) / 2 + float(\n city_weather_3['dailyForecasts']['forecastLocation']['forecast'][forecast_period][\n 'highTemperature']) / 2),\n forecast_description=city_weather_3['dailyForecasts']['forecastLocation']['forecast'][forecast_period][\n 'description'],\n forecasted_day=day_unix + forecast_period,\n forecast_period=forecast_period,\n name=str(city) + '_' + str(day_unix + forecast_period) + '_fp' + str(forecast_period) + '_here'\n # code to make the forecast recognizable (contains city, prediction day, predicted day, forecast period, provider)\n )\n\n Forecast_1_WWO.objects.create(\n forecast_provider='WorldWeatherOnline',\n # this block creates the Forecast objects for the WorldWeatherOnline provider (similar structure as with OpenWeatherMap)\n day_human=day_human,\n day_unix=day_unix,\n city=city,\n countrycode=city_countrycode,\n forecast_pressure_1=city_weather_4[\"data\"][\"weather\"][forecast_period][\"hourly\"][0][\"pressure\"],\n forecast_humidity_1=city_weather_4[\"data\"][\"weather\"][forecast_period][\"hourly\"][0][\"humidity\"],\n forecast_max_temp_1=city_weather_4[\"data\"][\"weather\"][forecast_period][\"maxtempC\"],\n forecast_min_temp_1=city_weather_4[\"data\"][\"weather\"][forecast_period][\"mintempC\"],\n forecast_temperature_1=math.ceil(\n float(city_weather_4[\"data\"][\"weather\"][forecast_period][\"maxtempC\"]) / 2 + float(\n city_weather_4[\"data\"][\"weather\"][forecast_period][\"mintempC\"]) / 2),\n forecast_description=city_weather_4[\"data\"][\"weather\"][forecast_period][\"hourly\"][0][\"weatherDesc\"][0][\n \"value\"],\n forecasted_day=day_unix + forecast_period,\n forecast_period=forecast_period,\n forecast_icon=city_weather_4[\"data\"][\"weather\"][forecast_period][\"hourly\"][0][\"weatherIconUrl\"][0][\n \"value\"],\n name=str(city) + '_' + str(day_unix + forecast_period) + '_fp' + str(\n forecast_period) + '_WorldWeatherOnline',\n # code to make the forecast recognizable (contains city, prediction day, predicted day, forecast period, provider)\n )\n\n context = {'weather_data_current': weather_data, 'form': form}\n return context # submits output to template\n","sub_path":"hello/processing3.py","file_name":"processing3.py","file_ext":"py","file_size_in_byte":10675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"183876398","text":"import logging\n\nfrom .processors import JsonProcessor\nfrom . import Config\nfrom .stores import IncidentFileStore, RawStore, ProcessedFileStore\nfrom .routes.push import PushReceiver\n\nimport json\nfrom bos_incidents.exceptions import DuplicateIncidentException\n\nimport threading\nimport time\nfrom strict_rfc3339 import InvalidRFC3339Error\n\nfrom . import utils\nfrom .utils import slugify\nfrom datetime import timedelta\n\nfrom bos_incidents.format import string_to_incident\nfrom bos_incidents import factory\n\nfrom dataproxy.utils import CommonFormat\n\n\nincidents_storage = factory.get_incident_storage()\n\n\ndef _send_to_witness(processor, incident, targets=None):\n try:\n initial_delay = Config.get(\"subscriptions\",\n \"delay_before_initial_sending_in_seconds\",\n incident[\"call\"],\n 0)\n\n if initial_delay > 0:\n logging.getLogger(__name__).info(\"Incident \" + incident[\"unique_string\"] + \": Waiting before sending \" + incident[\"call\"])\n time.sleep(initial_delay)\n logging.getLogger(__name__).info(\"Incident \" + incident[\"unique_string\"] + \": Sending result now\")\n\n PushReceiver.subscribed_witnesses_status = processor.send_to_witness(\n incident,\n targets=targets\n )\n received_witnesses = len([key for key, value in PushReceiver.subscribed_witnesses_status.items() if value == \"ok\"])\n logging.getLogger(__name__).debug(\"Incident \" + incident[\"unique_string\"] + \": Successfully sent to \" + str(received_witnesses) + \" witnesses\")\n return received_witnesses\n except Exception as e:\n logging.getLogger(__name__).info(\"Incident \" + incident[\"unique_string\"] + \": PUSH to witness failed, continueing anyways, exception below\")\n logging.getLogger(__name__).exception(e)\n\n\ndef _send_list_to_witness(processor, incident_list, targets=None, async_queue=True):\n for incident in incident_list:\n logging.getLogger(__name__).info(\"Trigger sending \" + incident[\"unique_string\"])\n\n if async_queue:\n # send to witnesses\n thr = threading.Thread(target=_send_to_witness,\n args=(processor, incident, targets,))\n thr.start() # we dont care when it finishes\n else:\n _send_to_witness(processor, incident, targets=targets)\n\n\ndef process_content(provider_name,\n processor,\n processed_store,\n incident_store,\n file_content,\n file_ending,\n restrict_witness_group=None, # deprecated\n async_queue=True,\n target=None):\n file_name = None\n\n if restrict_witness_group is None and target is not None:\n restrict_witness_group = target\n\n # before storing, check if its worth processing\n is_interesting = file_content is not None\n if is_interesting and processor:\n is_interesting = processor.source_of_interest(\n file_content\n )\n incidents = []\n do_not_send_to_witness = True\n if is_interesting:\n # store found file again\n file_name = processed_store.save(\n provider_name,\n file_content,\n file_ext=file_ending)\n try:\n # process content (should be asynchronous)\n if processor:\n for incident in processor.process(file_content):\n logging.getLogger(__name__ + \"_\" + provider_name).debug(\"Postprocessing \" + incident[\"unique_string\"])\n incident[\"provider_info\"][\"source_file\"] = file_name\n incidents.append(incident)\n # only send if its a new incident\n logging.getLogger(__name__ + \"_\" + provider_name).debug(\" ... exists\")\n do_not_send_to_witness = incident_store.exists(\n provider_name,\n file_ext=\".json\",\n file_name=incident[\"unique_string\"])\n\n if not do_not_send_to_witness:\n logging.getLogger(__name__ + \"_\" + provider_name).debug(\" ... save in incidents folder\")\n # save locally\n incident_file = incident_store.save(\n provider_name,\n json.dumps(incident),\n file_ext=\".json\",\n file_name=incident[\"unique_string\"])\n try:\n logging.getLogger(__name__ + \"_\" + provider_name).debug(\" ... save in incidents database\")\n incidents_storage.insert_incident(incident)\n except DuplicateIncidentException:\n pass\n except Exception as e:\n logging.getLogger(__name__ + \"_\" + provider_name).info(provider_name + \": INSERT INTO stats failed, continueing anyways, incident file is \" + incident_file + \", exception below\")\n logging.getLogger(__name__ + \"_\" + provider_name).exception(e)\n incident.pop(\"_id\", None)\n try:\n logging.getLogger(__name__ + \"_\" + provider_name).debug(\" ... sending to witnesses (\" + str(restrict_witness_group) + \", async_queue=\" + str(async_queue) + \")\")\n if async_queue:\n # send to witnesses\n thr = threading.Thread(target=_send_to_witness,\n args=(processor, incident, _find_targets(restrict_witness_group),))\n thr.start() # we dont care when it finishes\n else:\n _send_to_witness(processor, incident, targets=_find_targets(restrict_witness_group))\n except Exception as e:\n logging.getLogger(__name__ + \"_\" + provider_name).info(provider_name + \": PUSH to witness failed, continueing anyways, incident file is \" + incident_file + \", exception below\")\n logging.getLogger(__name__ + \"_\" + provider_name).exception(e)\n except Exception as e:\n logging.getLogger(__name__ + \"_\" + provider_name).info(provider_name + \": Processing failed, continueing anyways. Source file is \" + file_name + \", exception below\")\n logging.getLogger(__name__ + \"_\" + provider_name).exception(e)\n\n return {\n \"file_name\": file_name,\n \"amount_incidents\": len(incidents),\n \"incidents\": incidents,\n \"do_not_send_to_witness\": do_not_send_to_witness,\n \"is_interesting\": is_interesting\n }\n\n\ndef _find_targets(target):\n matched = []\n for witness in Config.get(\"subscriptions\", \"witnesses\"):\n if target is not None:\n if target == witness.get(\"group\", None):\n matched.append(witness)\n elif target == witness[\"url\"] or target == witness.get(\"name\", None):\n matched.append(witness)\n else:\n matched.append(witness)\n return matched\n\n\ndef replay(restrict_witness_group=None,\n providers=None,\n received=None,\n processor=None,\n name_filter=None,\n incidents=None,\n async_execution=None,\n async_queue=None,\n only_report=None,\n target=None):\n if name_filter is None and (incidents is None or incidents == []):\n report = {\"name_filter\": \"Name filter must not be empty\"}\n return report\n if async_execution is None:\n async_execution = False\n if only_report is None:\n only_report = False\n\n logging.getLogger(__name__).info(\"Replay: Collecting configuration ...\")\n\n replay_stats = {}\n replay_stats[\"async_execution\"] = async_execution\n replay_stats[\"target\"] = target\n\n if providers is None:\n providers = list(Config.get(\"providers\").keys())\n if type(providers) == str:\n providers = [providers]\n\n replay_stats[\"providers\"] = providers\n\n if processor is None:\n processor = JsonProcessor()\n\n replay_stats[\"processor\"] = processor.__class__.__name__\n\n if restrict_witness_group is not None:\n target = restrict_witness_group\n\n matched_targets = _find_targets(target)\n replay_stats[\"matched_targets\"] = len(matched_targets)\n if len(matched_targets) == 0:\n logging.getLogger(__name__).info(\"Replay: No matched witnesses found for target \" + target)\n return replay_stats\n\n if incidents is None:\n incidents = []\n\n if type(name_filter) == str:\n name_filter = name_filter.split(\",\")\n\n if name_filter is not None:\n offset_left = 3\n offset_right = 3\n match_date = None\n for tmp in name_filter:\n tmp = slugify(tmp)\n try:\n match_date = utils.string_to_date(tmp[0:20])\n break\n except InvalidRFC3339Error:\n pass\n try:\n match_date = utils.string_to_date(tmp[0:8])\n break\n except InvalidRFC3339Error:\n pass\n try:\n match_date = utils.string_to_date(tmp[0:10])\n break\n except InvalidRFC3339Error:\n pass\n if \"create\" in name_filter:\n offset_left = 28\n if match_date and received is None:\n received = []\n for i in range(-offset_left, offset_right):\n _date = utils.date_to_string(match_date + timedelta(days=i))\n received.append(_date[0:4] + _date[5:7] + _date[8:10])\n\n folder_filter = []\n for provider in providers:\n folder_filter.append(provider)\n if received is None:\n received = [\"20181\", \"2019\"]\n if type(received) == str:\n received = [received]\n for tmp in received:\n folder_filter.append(tmp)\n\n replay_stats[\"folder_filter\"] = folder_filter\n replay_stats[\"name_filter\"] = name_filter\n\n logging.getLogger(__name__).info(\"Replay: Finding all incidents in file dump with configuration \" + str(replay_stats))\n for incident in processor.process_generic(\n folder=\"dump/d_incidents\",\n folder_filter=folder_filter,\n name_filter=name_filter):\n incidents.append(incident)\n\n if len(received) == 2:\n logging.getLogger(__name__).info(\"Replay: Querying local database for incidents\")\n regex_filter = \".*\".join(name_filter) + \".*\"\n # Only prepend \".*\" if there expected to be anything beforehand\n if not regex_filter.startswith(\"201\"):\n # cover all years 2010-2029\n regex_filter = \".*\" + regex_filter\n\n try:\n #if len(received) == 1:\n # if len(received[0]) == 8:\n # _from = datetime(received[0][0:4], received[0][4:6], received[0][6:8], 0, 0, tzinfo=tzutc())\n # _till = datetime(received[0][0:4], received[0][4:6], received[0][6:8], 23, 59, tzinfo=tzutc()) \n # elif len(received[0]) == 6:\n # _from = datetime(received[0][0:4], received[0][4:6], 1, 0, 0, tzinfo=tzutc())\n # _till = datetime(received[0][0:4], received[0][4:6], 28, 23, 59, tzinfo=tzutc())\n #else: \n # _from = None\n for incident in incidents_storage.get_incidents(\n dict(\n unique_string={\"$regex\": regex_filter, \"$options\": \"i\"}#,\n #timestamp={\"$lt\": float(_till.timestamp()), \"$gt\": float(_from.timestamp())}\n )\n ):\n # don't add duplicates\n if incident[\"provider_info\"][\"name\"] + \"-\" + incident[\"unique_string\"] not in [x[\"provider_info\"][\"name\"] + \"-\" + x[\"unique_string\"] for x in incidents]:\n incidents.append(incident)\n except Exception as e:\n logging.getLogger(__name__).warning(\"MongoDB not reachable, continueing anyways\" + str(e))\n pass\n\n else:\n if type(incidents) == str:\n incidents = [incidents]\n if type(incidents) == list and len(incidents) > 0 and type(incidents[0]) == str:\n manufactured = []\n for item in incidents:\n for provider in providers:\n manufactured.append(string_to_incident(item, provider_info=provider))\n incidents = manufactured\n\n replay_stats[\"amount_incidents\"] = len(incidents)\n incident_ids = []\n for incident in incidents:\n incident_ids.append(incident[\"unique_string\"])\n\n if replay_stats[\"amount_incidents\"] == 1:\n replay_stats[\"incidents\"] = incidents\n else:\n replay_stats[\"incidents\"] = incident_ids\n\n logging.getLogger(__name__).info(\"Found \" + str(len(incident_ids)) + \" incidents.\")\n\n if not only_report:\n sorted_list = sorted(incidents, key=lambda k: k['provider_info']['pushed'])\n logging.getLogger(__name__).info(\"Replay: Sorted \" + str(len(sorted_list)) + \" incidents ...\")\n\n if async_execution:\n # send to witnesses\n thr = threading.Thread(target=_send_list_to_witness,\n args=(processor, sorted_list, matched_targets, async_queue))\n thr.start() # we dont care when it finishes\n replay_stats[\"incidents_sent\"] = True\n else:\n number = _send_list_to_witness(processor,\n sorted_list,\n targets=matched_targets,\n async_queue=async_queue)\n replay_stats[\"incidents_sent\"] = number\n else:\n replay_stats[\"incidents_sent\"] = False\n return replay_stats\n","sub_path":"dataproxy/implementations.py","file_name":"implementations.py","file_ext":"py","file_size_in_byte":14288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"495634573","text":"#Leetcode 22\n\n#解法一,深度优先搜索DFS(回溯算法)\n#当前左右括号都有大于0个可以使用的时候,才产生分支;\n#产生左分支的时候,只看当前是否还有左括号可以使用;\n#产生右分支的时候,还受到左分支的限制,右边剩余可以使用��括号数量一定得在严格大于左边剩余的数量的时候,才可以产生分支;\n#在左边和右边剩余的括号数都等于0的时候结算\n\n#left,right是未用的括号数,空间O(2^n)? 时间O(2^n)\nclass Solution:\n def generateParenthesis(self, n: int) -> List[str]:\n res = []\n s = ''\n def dfs(s, left, right):\n if left == 0 and right == 0:\n res.append(s)\n if left > right:\n return\n if left > 0:\n dfs(s + '(', left - 1, right)\n if right > 0:\n dfs(s + ')', left, right - 1)\n dfs(s, n, n)\n return res\n\n\n#left,right代表已用的括号数,空间O(2^n)? 时间O(2^n)\nclass Solution:\n def generateParenthesis(self, n: int) -> List[str]:\n res = []\n s = ''\n def dfs(s, left, right, n):\n if left == n and right == n:\n res. append(s)\n return\n if left < right:\n return \n if left < n:\n dfs(s + '(', left + 1, right, n)\n if right < n:\n dfs(s + ')', left, right + 1, n)\n dfs(s, 0, 0, n)\n return res\n","sub_path":"Week_02/22-括号生成.py","file_name":"22-括号生成.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"25599759","text":"import requests\nimport json\nimport pandas as pd\n\n\"\"\"\n使用 requests 库抓取知乎任意一个话题\n\n--话题:听完国家主席习近平发表的 2021 年新年贺词,你有怎样的感触?对于自己的 2021 年有什么期待?\n\n排名前 15 条的答案内容 \n(如果对前端熟悉请抓取所有答案),并将内容保存到本地的一个文件。\n\"\"\"\n\ndef html_data(myurl):\n header = {'user-agent': \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36\"\n }\n try:\n response = requests.get(myurl, headers=header)\n response.raise_for_status()\n return response.text \n except requests.HTTPError as e:\n print(e)\n print(\"HTTPError\")\n except requests.RequestException as e:\n print(e)\n except:\n print(\"Unknown Error!\")\n \ndef parse_data(html):\n json_data = json.loads(html)['data']\n comments = []\n try:\n for i in json_data:\n comment = []\n comment.append(i['author']['name']) # 姓名\n comment.append(i['excerpt']) # 回答内容\n comments.append(comment)\n # print(comment)\n return comments \n except Exception as e:\n print(comment)\n print(e)\ndef save_data(comments):\n filename = \"spider.txt\" \n dataframe = pd.DataFrame(comments) \n dataframe.to_csv(filename, mode='a', index=False, sep=':', header=False)\n # exit(0)\n \ndef main(): \n myurl = \"https://www.zhihu.com/api/v4/questions/437329650/answers?include=data%5B%2A%5D.is_normal%2Cadmin_closed_comment%2Creward_info%2Cis_collapsed%2Cannotation_action%2Cannotation_detail%2Ccollapse_reason%2Cis_sticky%2Ccollapsed_by%2Csuggest_edit%2Ccomment_count%2Ccan_comment%2Ccontent%2Ceditable_content%2Cattachment%2Cvoteup_count%2Creshipment_settings%2Ccomment_permission%2Ccreated_time%2Cupdated_time%2Creview_info%2Crelevant_info%2Cquestion%2Cexcerpt%2Cis_labeled%2Cpaid_info%2Cpaid_info_content%2Crelationship.is_authorized%2Cis_author%2Cvoting%2Cis_thanked%2Cis_nothelp%2Cis_recognized%3Bdata%5B%2A%5D.mark_infos%5B%2A%5D.url%3Bdata%5B%2A%5D.author.follower_count%2Cbadge%5B%2A%5D.topics%3Bdata%5B%2A%5D.settings.table_of_content.enabled&limit=5&offset=30&platform=desktop&sort_by=default\" \n html = html_data(myurl)\n totals = json.loads(html)['paging']['totals'] \n print(totals)\n print('---'*10) \n page = 0 \n while(page < totals):\n myurl = \"https://www.zhihu.com/api/v4/questions/437329650/answers?include=data%5B%2A%5D.is_normal%2Cadmin_closed_comment%2Creward_info%2Cis_collapsed%2Cannotation_action%2Cannotation_detail%2Ccollapse_reason%2Cis_sticky%2Ccollapsed_by%2Csuggest_edit%2Ccomment_count%2Ccan_comment%2Ccontent%2Ceditable_content%2Cattachment%2Cvoteup_count%2Creshipment_settings%2Ccomment_permission%2Ccreated_time%2Cupdated_time%2Creview_info%2Crelevant_info%2Cquestion%2Cexcerpt%2Cis_labeled%2Cpaid_info%2Cpaid_info_content%2Crelationship.is_authorized%2Cis_author%2Cvoting%2Cis_thanked%2Cis_nothelp%2Cis_recognized%3Bdata%5B%2A%5D.mark_infos%5B%2A%5D.url%3Bdata%5B%2A%5D.author.follower_count%2Cbadge%5B%2A%5D.topics%3Bdata%5B%2A%5D.settings.table_of_content.enabled&limit=5&offset=\"+ str(page) +\"&platform=desktop&sort_by=default\"\n html = html_data(myurl)\n comments = parse_data(html)\n save_data(comments) \n print(page)\n page += 5 \nif __name__ == '__main__':\n main()\n print(\"完成���\")","sub_path":"week02/zhihu_assigment_01.py","file_name":"zhihu_assigment_01.py","file_ext":"py","file_size_in_byte":3545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"221397520","text":"import hashlib\nfrom datetime import timedelta\nfrom urllib.parse import unquote\n\nimport pytz\nfrom django.http import (\n Http404, HttpResponse, HttpResponseNotModified, HttpResponsePermanentRedirect,\n)\nfrom django.urls import resolve, reverse\nfrom django.utils.functional import cached_property\nfrom django.utils.timezone import now\nfrom django.views.generic import TemplateView\nfrom django_context_decorator import context\n\nfrom pretalx.common.mixins.views import EventPermissionRequired\nfrom pretalx.common.signals import register_data_exporters\n\n\nclass ScheduleDataView(EventPermissionRequired, TemplateView):\n permission_required = 'agenda.view_schedule'\n\n @cached_property\n def version(self):\n if 'version' in self.kwargs:\n return unquote(self.kwargs['version'])\n return None\n\n def dispatch(self, request, *args, **kwargs):\n if 'version' in request.GET:\n kwargs['version'] = request.GET['version']\n return HttpResponsePermanentRedirect(\n reverse(\n f'agenda:versioned-{request.resolver_match.url_name}',\n args=args,\n kwargs=kwargs,\n )\n )\n return super().dispatch(request, *args, **kwargs)\n\n def get_object(self):\n if self.version:\n return self.request.event.schedules.filter(\n version__iexact=self.version\n ).first()\n if self.request.event.current_schedule:\n return self.request.event.current_schedule\n return None\n\n @context\n @cached_property\n def schedule(self):\n return self.get_object()\n\n def get_context_data(self, **kwargs):\n result = super().get_context_data(**kwargs)\n schedule = self.schedule\n event = self.request.event\n\n if not schedule and self.version:\n result['version'] = self.version\n result['error'] = f'Schedule \"{self.version}\" not found.'\n return result\n if not schedule:\n result['error'] = 'Schedule not found.'\n return result\n result['schedules'] = event.schedules.filter(\n published__isnull=False\n ).values_list('version')\n return result\n\n\nclass ExporterView(ScheduleDataView):\n def get_exporter(self, request):\n url = resolve(request.path_info)\n\n if url.url_name == 'export':\n exporter = url.kwargs.get('name') or unquote(\n self.request.GET.get('exporter')\n )\n else:\n exporter = url.url_name\n\n exporter = exporter.lstrip('export.')\n responses = register_data_exporters.send(request.event)\n for _, response in responses:\n ex = response(request.event)\n if ex.identifier == exporter:\n if ex.public or request.is_orga:\n return ex\n return None\n\n def get(self, request, *args, **kwargs):\n exporter = self.get_exporter(request)\n if not exporter:\n raise Http404()\n try:\n exporter.schedule = self.schedule\n exporter.is_orga = getattr(self.request, 'is_orga', False)\n file_name, file_type, data = exporter.render()\n etag = hashlib.sha1(str(data).encode()).hexdigest()\n if 'HTTP_IF_NONE_MATCH' in request.META:\n if request.META['HTTP_IF_NONE_MATCH'] == etag:\n return HttpResponseNotModified()\n resp = HttpResponse(data, content_type=file_type)\n resp['ETag'] = etag\n if file_type not in ['application/json', 'text/xml']:\n resp['Content-Disposition'] = f'attachment; filename=\"{file_name}\"'\n return resp\n except Exception:\n raise Http404()\n\n\nclass ScheduleView(ScheduleDataView):\n template_name = 'agenda/schedule.html'\n permission_required = 'agenda.view_schedule'\n\n def get_object(self):\n if self.version == 'wip' and self.request.user.has_perm(\n 'orga.view_schedule', self.request.event\n ):\n return self.request.event.wip_schedule\n return super().get_object()\n\n def get_context_data(self, **kwargs):\n from pretalx.schedule.exporters import ScheduleData\n\n result = super().get_context_data(**kwargs)\n result['exporters'] = list(\n exporter(self.request.event)\n for _, exporter in register_data_exporters.send(self.request.event)\n )\n timezone = pytz.timezone(self.request.event.timezone)\n if 'schedule' not in result:\n return result\n\n result['data'] = ScheduleData(\n event=self.request.event, schedule=self.schedule\n ).data\n result['search'] = self.request.GET.get('q', '').lower()\n max_rooms = 0\n for date in result['data']:\n if date.get('first_start') and date.get('last_end'):\n start = (\n date.get('first_start')\n .astimezone(timezone)\n .replace(second=0, minute=0)\n )\n end = date.get('last_end').astimezone(timezone)\n date['height'] = int((end - start).total_seconds() / 60 * 2)\n date['hours'] = []\n step = start\n while step < end:\n date['hours'].append(step.strftime('%H:%M'))\n step += timedelta(hours=1)\n max_rooms = max(max_rooms, len(date['rooms']))\n for room in date['rooms']:\n for talk in room.get('talks', []):\n talk.top = int(\n (talk.start.astimezone(timezone) - start).total_seconds()\n / 60\n * 2\n )\n talk.height = int(talk.duration * 2)\n talk.is_active = talk.start <= now() <= talk.real_end\n result['max_rooms'] = max_rooms\n return result\n\n\nclass ChangelogView(EventPermissionRequired, TemplateView):\n template_name = 'agenda/changelog.html'\n permission_required = 'agenda.view_schedule'\n","sub_path":"src/pretalx/agenda/views/schedule.py","file_name":"schedule.py","file_ext":"py","file_size_in_byte":6188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"27109550","text":"class Solution:\n def snakesAndLadders(self, board: List[List[int]]) -> int:\n n = len(board)\n def label_to_position(label):\n r, c = divmod(label-1, n) \n # r = (lebel - 1) // n\n # c = (lebel - 1) % n\n if r % 2 == 0:\n return n-1-r, c\n else:\n return n-1-r, n-1-c\n \n seen = set()\n queue = collections.deque()\n queue.append((1, 0))\n while queue:\n label, step = queue.popleft()\n r, c = label_to_position(label)\n if board[r][c] != -1:\n label = board[r][c]\n if label == n*n:\n return step\n for x in range(1, 7):\n new_label = label + x\n if new_label <= n*n and new_label not in seen:\n seen.add(new_label)\n queue.append((new_label, step+1))\n return -1\n","sub_path":"909-snakes-and-ladders/909-snakes-and-ladders.py","file_name":"909-snakes-and-ladders.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"430363876","text":"import os\nimport glob\nimport pandas as pd\nimport xml.etree.ElementTree as ET\ndef xml_to_csv(path, type_directory):\n xml_list = []\n path_core = path[path.find('data/{}'.format(type_directory)):None] + \"/\"\n for xml_file in glob.glob(path + '/*.xml'):\n tree = ET.parse(xml_file)\n root = tree.getroot()\n for member in root.findall('object'):\n value = (path_core.replace('\\\\', '/') + root.find('filename').text,\n int(root.find('size')[0].text),\n int(root.find('size')[1].text),\n member[0].text,\n int(member[4][0].text),\n int(member[4][1].text),\n int(member[4][2].text),\n int(member[4][3].text)\n )\n xml_list.append(value)\n column_name = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax']\n xml_df = pd.DataFrame(xml_list, columns=column_name)\n return xml_df\n\n# https://stackoverflow.com/questions/17530542/how-to-add-pandas-data-to-an-existing-csv-file\ndef save_sml_to_csv(df, csv_path, sep=\",\"):\n if not os.path.isfile(csv_path):\n df.to_csv(csv_path, mode='a', index=False, sep=sep)\n elif len(df.columns) != len(pd.read_csv(csv_path, nrows=1, sep=sep).columns):\n raise Exception(\"Columns do not match!! Dataframe has \" + str(len(df.columns)) + \" columns. CSV file has \" + str(len(pd.read_csv(csv_path, nrows=1, sep=sep).columns)) + \" columns.\")\n elif not (df.columns == pd.read_csv(csv_path, nrows=1, sep=sep).columns).all():\n raise Exception(\"Columns and column order of dataframe and csv file do not match!!\")\n else:\n df.to_csv(csv_path, mode='a', index=False, sep=sep, header=False)\n\n\ndef main(directory_list):\n for Image_cat in directory_list:\n xml_file_name = 'data/{}_labels.csv'.format(Image_cat)\n image_path = os.path.join(os.getcwd(), 'data/{}'.format(Image_cat))\n if os.path.exists(xml_file_name):\n os.remove(xml_file_name)\n print(image_path)\n for i in os.walk(image_path):\n if i[2]:\n print(\"Folder: \")\n print(i[0])\n print(\"with images:\")\n print(i[2])\n print()\n xml_df = xml_to_csv(i[0], Image_cat)\n save_sml_to_csv(xml_df, xml_file_name)\n\n print('Successfully converted xml to csv.')\n\nmain(['train','test'])","sub_path":"model/archive/data/xml_to_csv.py","file_name":"xml_to_csv.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"93716824","text":"#Import modules\nimport nipype\nfrom os.path import join as opj\nimport os\nimport json\nfrom nipype.interfaces.fsl import (BET, ExtractROI, FAST, FLIRT, ImageMaths,\n MCFLIRT, SliceTimer, Threshold)\nfrom nipype.interfaces.spm import Smooth\nfrom nipype.interfaces.utility import IdentityInterface, Merge\nfrom nipype.interfaces.io import SelectFiles, DataSink, FreeSurferSource\nfrom nipype.algorithms.rapidart import ArtifactDetect\nfrom nipype.pipeline.engine import Workflow, Node, MapNode\nfrom nipype.interfaces.ants import Registration, ApplyTransforms\nfrom nipype.interfaces.fsl import Info\nfrom nipype.interfaces.freesurfer import FSCommand, MRIConvert, BBRegister\nfrom nipype.interfaces.c3 import C3dAffineTool\nfrom nipype.interfaces.matlab import MatlabCommand\n\nfrom os.path import join as opj\nfrom nipype.interfaces.spm import Normalize12\nfrom nipype.interfaces.utility import IdentityInterface\nfrom nipype.interfaces.io import SelectFiles, DataSink\nfrom nipype.algorithms.misc import Gunzip\nfrom nipype.pipeline.engine import Workflow, Node, MapNode\n\nfrom nilearn import image, plotting\nimport numpy as np\nimport pylab as plt\nimport numpy as np\n\nMatlabCommand.set_default_paths('/home/messina/spm12')\nMatlabCommand.set_default_matlab_cmd(\"matlab -nodesktop -nosplash\")\n\n# location of template in form of a tissue probability map to normalize to\ntemplate = '/home/messina/spm12/tpm/TPM.nii'\n\nbasedir = '/home/messina/ada_project/workingdir/PPMI/'\n\nexperiment_dir = '/home/messina/ada_project'\n\n#experiment_dir = '/Users/elisabettamessina/Desktop/ADA/ada2017hw/project/ExampleFolder2'\nworking_dir = 'workingdir'\noutput_dir = 'output_folder'\ninput_dir_1st = 'output_folder' \n# defining the subjec list \n# Specify variables\n\n# list of subject identifiers\nsubject_list = []\nfor fn in os.listdir(basedir):\n if fn[0]=='3' or fn[0]=='4':\n subject_list.append(fn)\n# list of session identifiers\ntask_list = ['rs']\n\n# Smoothing widths to apply\nfwhm = [4, 8]\n\n# TR of functional images\nTR = 2.4 \n\n# Isometric resample of functional images to voxel size (in mm)\niso_size = 4\n\n# ExtractROI - skip dummy scans\nextract = Node(ExtractROI(t_min=4, t_size=-1),\n output_type='NIFTI',\n name=\"extract\")\n\n# MCFLIRT - motion correction\nmcflirt = Node(MCFLIRT(mean_vol=True,\n save_plots=True,\n output_type='NIFTI'),\n name=\"mcflirt\")\n\n# SliceTimer - correct for slice wise acquisition\nslicetimer = Node(SliceTimer(index_dir=False,\n interleaved=True,\n output_type='NIFTI',\n time_repetition=TR),\n name=\"slicetimer\")\n\n# Smooth - image smoothing\nsmooth = Node(Smooth(), name=\"smooth\")\nsmooth.iterables = (\"fwhm\", fwhm)\n\n# Gunzip - unzip the structural image\ngunzip_struct = Node(Gunzip(), name=\"gunzip_struct\")\n\n# Gunzip - unzip the contrast image\ngunzip_con = MapNode(Gunzip(), name=\"gunzip_con\",\n iterfield=['in_file'])\n\n# Normalize - normalizes functional and structural images to the MNI template\nnormalize = Node(Normalize12(jobtype='estwrite',\n tpm=template,\n write_voxel_sizes=[1, 1, 1]),\n name=\"normalize\")\n\n# Artifact Detection - determines outliers in functional images\nart = Node(ArtifactDetect(norm_threshold=2,\n zintensity_threshold=3,\n mask_type='spm_global',\n parameter_source='FSL',\n use_differences=[True, False],\n plot_type='svg'),\n name=\"art\")\n\n# BET - Skullstrip anatomical Image\nbet_anat = Node(BET(frac=0.5,\n robust=True,\n output_type='NIFTI_GZ'),\n name=\"bet_anat\")\n\n# FAST - Image Segmentation\nsegmentation = Node(FAST(output_type='NIFTI_GZ'),\n name=\"segmentation\")\n\n# Select WM segmentation file from segmentation output\ndef get_wm(files):\n return files[-1]\n\n# Threshold - Threshold WM probability image\nthreshold = Node(Threshold(thresh=0.5,\n args='-bin',\n output_type='NIFTI_GZ'),\n name=\"threshold\")\n\n# FLIRT - pre-alignment of functional images to anatomical images\ncoreg_pre = Node(FLIRT(dof=6, output_type='NIFTI_GZ'),\n name=\"coreg_pre\")\n\n# FLIRT - coregistration of functional images to anatomical images with BBR\ncoreg_bbr = Node(FLIRT(dof=6,\n cost='bbr',\n schedule=opj(os.getenv('FSLDIR'),\n 'etc/flirtsch/bbr.sch'),\n output_type='NIFTI_GZ'),\n name=\"coreg_bbr\")\n\n# Apply coregistration warp to functional images\napplywarp = Node(FLIRT(interp='spline',\n apply_isoxfm=iso_size,\n output_type='NIFTI'),\n name=\"applywarp\")\n\n# Apply coregistration warp to mean file\napplywarp_mean = Node(FLIRT(interp='spline',\n apply_isoxfm=iso_size,\n output_type='NIFTI_GZ'),\n name=\"applywarp_mean\")\n\n\n# Create a coregistration workflow\ncoregwf = Workflow(name='coregwf')\ncoregwf.base_dir = opj(experiment_dir, working_dir)\n\n# Connect all components of the coregistration workflow\ncoregwf.connect([(bet_anat, segmentation, [('out_file', 'in_files')]),\n (segmentation, threshold, [(('partial_volume_files', get_wm),\n 'in_file')]),\n (bet_anat, coreg_pre, [('out_file', 'reference')]),\n (threshold, coreg_bbr, [('out_file', 'wm_seg')]),\n (coreg_pre, coreg_bbr, [('out_matrix_file', 'in_matrix_file')]),\n (coreg_bbr, applywarp, [('out_matrix_file', 'in_matrix_file')]),\n (bet_anat, applywarp, [('out_file', 'reference')]),\n (coreg_bbr, applywarp_mean, [('out_matrix_file', 'in_matrix_file')]),\n (bet_anat, applywarp_mean, [('out_file', 'reference')]),\n ])\n\n# Infosource - a function free node to iterate over the list of subject names\ninfosource = Node(IdentityInterface(fields=['subject_id', 'task_name']),\n name=\"infosource\")\ninfosource.iterables = [('subject_id', subject_list),\n ('task_name', task_list)]\n\n# SelectFiles - to grab the data (alternativ to DataGrabber)\nanat_file = opj('{subject_id}', 'ep2d_RESTING_STATE', 'anat', 'anat.nii')\nfunc_file = opj('{subject_id}', 'ep2d_RESTING_STATE', 'func',\n 'rest.nii')\n\ntemplates = {'anat': anat_file,\n 'func': func_file}\nselectfiles = Node(SelectFiles(templates,\n base_directory= experiment_dir + '/workingdir/PPMI/'),\n name=\"selectfiles\")\n\n# Datasink - creates output folder for important outputs\ndatasink = Node(DataSink(base_directory=experiment_dir,\n container=output_dir),\n name=\"datasink\")\n\n## Use the following DataSink output substitutions\nsubstitutions = [('_subject_id_', ''),\n ('_task_name_', '/task-'),\n ('_fwhm_', 'fwhm-'),\n ('_roi', ''),\n ('_mcf', ''),\n ('_st', ''),\n ('_flirt', ''),\n ('.nii_mean_reg', '_mean'),\n ('.nii.par', '.par'),\n ]\nsubjFolders = [('fwhm-%s/' % f, 'fwhm-%s_' % f) for f in fwhm]\nsubstitutions.extend(subjFolders)\ndatasink.inputs.substitutions = substitutions\n\n# Create a preprocessing workflow\npreproc = Workflow(name='preproc')\npreproc.base_dir = opj(experiment_dir, working_dir)\n\n# Connect all components of the preprocessing workflow\npreproc.connect([(infosource, selectfiles, [('subject_id', 'subject_id'),\n ('task_name', 'task_name')]),\n (selectfiles, extract, [('func', 'in_file')]),\n (extract, mcflirt, [('roi_file', 'in_file')]),\n (mcflirt, slicetimer, [('out_file', 'in_file')]),\n\n (selectfiles, coregwf, [('anat', 'bet_anat.in_file'),\n ('anat', 'coreg_bbr.reference')]),\n (mcflirt, coregwf, [('mean_img', 'coreg_pre.in_file'),\n ('mean_img', 'coreg_bbr.in_file'),\n ('mean_img', 'applywarp_mean.in_file')]),\n (slicetimer, coregwf, [('slice_time_corrected_file', 'applywarp.in_file')]),\n \n (coregwf, smooth, [('applywarp.out_file', 'in_files')]),\n\n (mcflirt, datasink, [('par_file', 'preproc.@par')]),\n (smooth, datasink, [('smoothed_files', 'preproc.@smooth')]),\n (coregwf, datasink, [('applywarp_mean.out_file', 'preproc.@mean')]),\n\n (coregwf, art, [('applywarp.out_file', 'realigned_files')]),\n (mcflirt, art, [('par_file', 'realignment_parameters')]),\n\n (coregwf, datasink, [('coreg_bbr.out_matrix_file', 'preproc.@mat_file'),\n ('bet_anat.out_file', 'preproc.@brain')]),\n (art, datasink, [('outlier_files', 'preproc.@outlier_files'),\n ('plot_files', 'preproc.@plot_files')]),\n ])\n\n\n# DEBUG MODE:\npreproc.config['execution'] = {'stop_on_first_rerun': 'False',\n 'hash_method': 'timestamp'}\nfrom nipype import config, logging\nconfig.enable_debug_mode()\nlogging.update_logging(config)\n\npreproc.run('MultiProc', plugin_args={'n_procs': 4})\n\n # Gunzip - unzip the structural image\ngunzip_struct = Node(Gunzip(), name=\"gunzip_struct\")\n\n# Gunzip - unzip the contrast image\ngunzip_con = MapNode(Gunzip(), name=\"gunzip_con\",\n iterfield=['in_file'])\n\n# Normalize - normalizes functional and structural images to the MNI template\nnormalize = Node(Normalize12(jobtype='estwrite',\n tpm=template,\n write_voxel_sizes=[1, 1, 1]),\n name=\"normalize\")\n\n# Specify Normalization-Workflow & Connect Nodes\nnormflow = Workflow(name='normflow')\nnormflow.base_dir = opj(experiment_dir, working_dir)\n\n# Connect up ANTS normalization components\nnormflow.connect([(gunzip_struct, normalize, [('out_file', 'image_to_align')]),\n (gunzip_con, normalize, [('out_file', 'apply_to_files')]),\n ])\n\n# Infosource - a function free node to iterate over the list of subject names\ninfosource = Node(IdentityInterface(fields=['subject_id']),\n name=\"infosource\")\ninfosource.iterables = [('subject_id', subject_list)]\n\n# SelectFiles - to grab the data (alternativ to DataGrabber)\nanat_file = opj(working_dir, 'PPMI', '{subject_id}', 'ep2d_RESTING_STATE', 'anat', 'zipped', 'anat.nii.gz')\ncon_file = opj(output_dir, 'preproc', '{subject_id}', 'task-rs',\n 'rest_mean.nii.gz')\n\n\ntemplates = {'anat': anat_file,\n 'con': con_file,\n }\nselectfiles = Node(SelectFiles(templates,\n base_directory=experiment_dir),\n name=\"selectfiles\")\n\n# Datasink - creates output folder for important outputs\ndatasink = Node(DataSink(base_directory=experiment_dir,\n container=output_dir),\n name=\"datasink\")\n\n# Use the following DataSink output substitutions\nsubstitutions = [('_subject_id_', '')]\ndatasink.inputs.substitutions = substitutions\n\n# Connect SelectFiles and DataSink to the workflow\nnormflow.connect([(infosource, selectfiles, [('subject_id', 'subject_id')]),\n (selectfiles, gunzip_struct, [('anat', 'in_file')]),\n (selectfiles, gunzip_con, [('con', 'in_file')]),\n (normalize, datasink, [('normalized_files',\n 'normalized.@files'),\n ('normalized_image',\n 'normalized.@image'),\n ('deformation_field',\n 'normalized.@field'),\n ]),\n ])\n\n# DEBUG MODE:\nnormflow.config['execution'] = {'stop_on_first_rerun': 'False',\n 'hash_method': 'timestamp'}\nfrom nipype import config, logging\nconfig.enable_debug_mode()\nlogging.update_logging(config)\n\nnormflow.run('MultiProc', plugin_args={'n_procs': 8})\n","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":12689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"563853231","text":"import tensorflow as tf\nimport numpy as np\nimport os\nimport cv2\nimport face_recognition\nimport align.detect_face\n\n\nFPS = 30\n\nDB_ROOT_DIR = \"./Face_Database\"\n\ndetector = cv2.CascadeClassifier('haar_alt.xml')\n\n# Create MTCNN\nwith tf.Graph().as_default():\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1.0)\n sess_ = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))\n with sess_.as_default():\n pnet, rnet, onet = align.detect_face.create_mtcnn(sess_, None)\n\n\ndef detect_face(img, face_size=1, detector_id=0):\n \"\"\"\n Detect face in image, 3 detector are available.\n :param img: Image of user\n :param face_size: Minimum detected face size\n :param detector_id: ID of face detector\n :return:\n \"\"\"\n if detector_id == 0:\n # MTCNN\n face_locations = []\n bounding_boxes, _ = align.detect_face.detect_face(img=img,\n minsize=20 * face_size,\n pnet=pnet, rnet=rnet, onet=onet,\n threshold=[0.6, 0.7, 0.7],\n factor=0.709)\n for i in range(bounding_boxes.shape[0]): # Convert the face boxes to the format required by face_recognition\n face_locations.append((int(bounding_boxes[i, 1]),\n int(bounding_boxes[i, 2]),\n int(bounding_boxes[i, 3]),\n int(bounding_boxes[i, 0])))\n elif detector_id == 1:\n # OpenCV haar cascade classifier\n face_locations = []\n face_positions = detector.detectMultiScale(image=img,\n scaleFactor=1.1,\n minNeighbors=3,\n minSize=(20 * face_size, 20 * face_size),\n maxSize=(240, 240))\n for face_position in face_positions: # Convert the face boxes to the format required by face_recognition\n face_locations.append((int(face_position[1]),\n int(face_position[0] + face_position[3]),\n int(face_position[1] + face_position[2]),\n int(face_position[0])))\n elif detector_id == 2:\n # HOG detector in face_recognition API\n face_locations = face_recognition.face_locations(img=img,\n number_of_times_to_upsample=1,\n model=\"hog\",)\n else:\n print('Invalid Detector ID!')\n return\n\n return face_locations\n\n\ndef realtime_recognition(group_name, face_size=1, track_interval=200, recognition_interval=2000, scale_factor=1, tolerance=0.6):\n \"\"\"\n Run realtime face recognition.\n :param group_name: Name of user group\n :param face_size: Minimum detected face size\n :param track_interval: Face detect interval/ms\n :param recognition_interval: Face recognize interval/ms\n :param scale_factor: Image processing zoom factor\n :param tolerance: Face recognition threshold\n :return: None\n \"\"\"\n # Load face database\n db_path = os.path.join(DB_ROOT_DIR, group_name)\n\n known_face_encodings = [] # Features in database\n known_face_names = [] # Names in database\n\n for person in os.listdir(db_path):\n known_face_encodings.append(np.load(os.path.join(db_path, person)))\n known_face_names.append(person.replace(\".\", \"_\").split(\"_\")[0])\n\n face_locations = [] # Container for detected face boxes\n face_names = [] # Container for recognized face names\n\n cap = cv2.VideoCapture(0)\n cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)\n cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)\n ret, frame = cap.read()\n\n timer = 0 # Frame skip timer\n while ret:\n timer += 1\n ret, frame = cap.read()\n\n # Face detection\n if timer % (track_interval * FPS // 1000) == 0:\n small_frame = cv2.resize(frame, (0, 0), fx=1 / scale_factor, fy=1 / scale_factor)\n rgb_small_frame = cv2.cvtColor(small_frame, cv2.COLOR_BGR2RGB)\n # gray_small_frame = cv2.cvtColor(small_frame, cv2.COLOR_BGR2GRAY)\n face_locations = detect_face(rgb_small_frame, face_size)\n\n # Face recognition\n if timer % (recognition_interval * FPS // 1000) == 0 and face_locations != []:\n rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n \n # Encode faces into 128-dimensional features\n face_encodings = face_recognition.face_encodings(face_image=rgb_frame,\n known_face_locations=face_locations * scale_factor,\n num_jitters=1\n )\n face_names.clear()\n for face_encoding in face_encodings:\n matches = face_recognition.compare_faces(known_face_encodings=known_face_encodings,\n face_encoding_to_check=face_encoding,\n tolerance=tolerance\n )\n name = \"Unknown\"\n face_distances = face_recognition.face_distance(face_encodings=known_face_encodings,\n face_to_compare=face_encoding\n )\n\n best_match_index = np.argmin(face_distances)\n if matches[best_match_index]:\n name = known_face_names[int(best_match_index)]\n face_names.append(name)\n\n # Draw face boxes and names\n for (top, right, bottom, left), name in zip(face_locations, face_names):\n\n top *= scale_factor\n right *= scale_factor\n bottom *= scale_factor\n left *= scale_factor\n\n cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 1)\n cv2.rectangle(frame, (left, bottom), (right, int(bottom + (bottom - top) * 0.25)), (0, 0, 255), cv2.FILLED)\n cv2.putText(frame, name, (left, int(bottom + (bottom - top) * 0.24)),\n cv2.FONT_HERSHEY_DUPLEX, (right - left) / 120, (255, 255, 255), 1)\n\n cv2.imshow('camera', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n cap.release()\n cv2.destroyWindow(\"camera\")\n\n\nif __name__ == '__main__':\n realtime_recognition(group_name=\"test_group\", face_size=1, track_interval=200, recognition_interval=1000, scale_factor=1, tolerance=0.6)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"403134649","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport satlas as sat\nfrom satlasaddon import RateModelPolar, RateModelDecay\nimport scipy.constants as csts\n\nsat.set(['standard'])#, 'online'])\nC = csts.physical_constants['speed of light in vacuum'][0]\nEV_TO_MHZ = csts.physical_constants['electron volt-hertz relationship'][0] * 1e-6\nAMU_TO_KG = csts.physical_constants['atomic mass unit-kilogram relationship'][0]\nEV_TO_J = csts.physical_constants['electron volt-joule relationship'][0]\n\nefficiency = 1.0 / 50000\narea = 0.8 #cm^2\nlaser_power_pump = 10 #mW\nlaser_power_optical = 100 #mW\nlaser_intensity_pump = laser_power_pump / area * 10 # W/m^2\nlaser_intensity_optical = laser_power_optical / area * 10 # W/m^2\nlaser_mode = 1\n\nfield = 6 * 10 ** (-4) # T\n\nNa23_I = 1.5\nNa23_mu = 2.2176556\nNa23_Q = 1.1045\nNa26_I = 3.0\nNa26_mu = 2.851\nNa26_Q = -0.0053\nB_factor = Na26_Q / Na23_Q\nA_factor = (Na26_mu / Na26_I) / (Na23_mu / Na23_I)\n\nI = 3.0\n\nL_D2 = [2.0, 2.0]\n\nJ_D2 = [1.5, 0.5]\n\nABC_D2 = [[18.534 * A_factor, 2.724 * B_factor, 0], [885.8130644 * A_factor, 0, 0]]\n\nlevel_energies_D2 = [2.102297159, 0]\n\ncentroids_D2 = [0]\n\nA_array_D2 = np.array([[0, 6.1542e6],\n [0, 0]])\n\nJ = J_D2\nL = L_D2\nABC = ABC_D2\nlevel_energies = level_energies_D2\ncentroids = centroids_D2\nA_array = A_array_D2\n\nf_st = np.abs(np.diff(level_energies_D2)) * EV_TO_MHZ\n\nmass = 25.992633\nmass = mass * AMU_TO_KG\n\ne = 50 * 10**3\nl_pumping = 1.9\nl_optical_detection = 0.2\nenergy = e * EV_TO_J\nvelocity = np.sqrt(2 * energy / mass)\ntof_pump = l_pumping / velocity\ntof_optical = l_optical_detection / velocity\n\nargs = (I, J, L, ABC, centroids, level_energies, A_array)\nkwargs = {'laser_intensity': [laser_intensity_pump],\n 'scale': 100,\n 'laser_mode': [laser_mode],\n 'shape': 'lorentzian',\n 'fwhmL': None,\n 'interaction_time': tof_pump,\n 'field': field}\n\n# Next, calculate with just 1 laser\nmodel_pump = RateModelPolar(*args, **kwargs)\nkwargs['interaction_time'] = tof_optical\nkwargs['laser_intensity'] = [laser_intensity_optical]\nkwargs['scale'] = 1 * efficiency\nmodel_optical = RateModelDecay(*args, **kwargs)\n\nstep = 0.5\nextra = 200\nfreqs = np.arange(model_pump.locations.min() - extra, model_pump.locations.max() + extra + step, step)\nx_plot = freqs - f_st\n\nresp_pump = model_pump(freqs)\nresp_optical = model_optical(freqs)\n\nfig, ax = plt.subplots(2, 1, sharex=True)\nax[0].plot(x_plot, resp_pump)\nax[1].plot(x_plot, resp_optical)\n\nax[0].set_ylabel('Polarisation [%]')\nax[1].set_ylabel('Decay rate/atom [Hz]')\nf_st = f_st[0] * 1e6\nax[1].set_xlabel('Frequency offset from {:.5f} nm [MHz]'.format(C/f_st * 1e9))\n\n# fig.savefig('Na_simulation.pdf', bbox_inches='tight')\n\nplt.show()\n","sub_path":"polarization/test_na.py","file_name":"test_na.py","file_ext":"py","file_size_in_byte":2725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"95470818","text":"from __future__ import absolute_import\n\nimport json\nimport os\nimport threading\nfrom datetime import datetime\n\nfrom .log import logger\nfrom .agent_const import AGENT_DEFAULT_HOST, AGENT_DEFAULT_PORT\nfrom .fsm import Fsm\nfrom .sensor import Sensor\nimport instana.singletons\n\ntry:\n import urllib.request as urllib2\nexcept ImportError:\n import urllib2\n\n\nclass From(object):\n pid = \"\"\n agentUuid = \"\"\n\n def __init__(self, **kwds):\n self.__dict__.update(kwds)\n\n\nclass Head(urllib2.Request):\n\n def get_method(self):\n return \"HEAD\"\n\n\nclass Put(urllib2.Request):\n\n def get_method(self):\n return \"PUT\"\n\n\nclass Agent(object):\n sensor = None\n host = AGENT_DEFAULT_HOST\n port = AGENT_DEFAULT_PORT\n fsm = None\n from_ = From()\n last_seen = None\n last_fork_check = None\n _boot_pid = os.getpid()\n\n def __init__(self):\n logger.debug(\"initializing agent\")\n self.sensor = Sensor(self)\n self.fsm = Fsm(self)\n\n def to_json(self, o):\n try:\n return json.dumps(o, default=lambda o: {k.lower(): v for k, v in o.__dict__.items()},\n sort_keys=False, separators=(',', ':')).encode()\n except Exception as e:\n logger.info(\"to_json: \", e, o)\n\n def is_timed_out(self):\n if self.last_seen and self.can_send:\n diff = datetime.now() - self.last_seen\n if diff.seconds > 60:\n return True\n return False\n\n def can_send(self):\n # Watch for pid change in the case of ; if so, re-announce\n current_pid = os.getpid()\n if self._boot_pid != current_pid:\n self._boot_pid = current_pid\n self.handle_fork()\n return False\n\n if (self.fsm.fsm.current == \"good2go\"):\n return True\n\n return False\n\n def head(self, url):\n return self.request(url, \"HEAD\", None)\n\n def request(self, url, method, o):\n return self.full_request_response(url, method, o, False, \"\")\n\n def request_response(self, url, method, o):\n return self.full_request_response(url, method, o, True, \"\")\n\n def request_header(self, url, method, header):\n return self.full_request_response(url, method, None, False, header)\n\n def full_request_response(self, url, method, o, body, header):\n b = None\n h = None\n try:\n if method == \"HEAD\":\n request = Head(url)\n elif method == \"GET\":\n request = urllib2.Request(url)\n elif method == \"PUT\":\n request = Put(url, self.to_json(o))\n request.add_header(\"Content-Type\", \"application/json\")\n else:\n request = urllib2.Request(url, self.to_json(o))\n request.add_header(\"Content-Type\", \"application/json\")\n\n response = urllib2.urlopen(request, timeout=2)\n\n if not response:\n self.reset()\n else:\n if response.getcode() < 200 or response.getcode() >= 300:\n logger.error(\"Request returned erroneous code\", response.getcode())\n if self.can_send():\n self.reset()\n else:\n self.last_seen = datetime.now()\n if body:\n b = response.read()\n\n if header:\n h = response.info().get(header)\n\n if method == \"HEAD\":\n b = True\n except Exception as e:\n # No need to show the initial 404s or timeouts. The agent\n # should handle those correctly.\n if not (type(e) is urllib2.HTTPError and e.code == 404):\n logger.debug(\"%s: full_request_response: %s\" %\n (threading.current_thread().name, str(e)))\n\n return (b, h)\n\n def make_url(self, prefix):\n return self.make_host_url(self.host, prefix)\n\n def make_host_url(self, host, prefix):\n port = self.sensor.options.agent_port\n if port == 0:\n port = AGENT_DEFAULT_PORT\n\n return self.make_full_url(host, port, prefix)\n\n def make_full_url(self, host, port, prefix):\n s = \"http://%s:%s%s\" % (host, str(port), prefix)\n if self.from_.pid != 0:\n s = \"%s%s\" % (s, self.from_.pid)\n\n return s\n\n def set_from(self, json_string):\n if type(json_string) is bytes:\n raw_json = json_string.decode(\"UTF-8\")\n else:\n raw_json = json_string\n\n self.from_ = From(**json.loads(raw_json))\n\n def reset(self):\n self.last_seen = None\n self.from_ = From()\n self.fsm.reset()\n\n def handle_fork(self):\n self.reset()\n self.sensor.handle_fork()\n instana.singletons.tracer.handle_fork()\n","sub_path":"instana/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":4855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"517462596","text":"\"\"\"\nAuthor: Aggelos Kolaitis \nLast Update: 2019/10/17\nDescription: Refreshes local database of MaaS machines\n\n# Usage:\n$ mjt_refresh\n\n# Notes:\n* This process may take 2-3 minutes for big MaaS installations\n\"\"\"\n\nfrom maasjuju_toolkit.config import Config\nfrom maasjuju_toolkit.util import (\n session, MaaSError, db, MaaSCache, exit_with_error)\n\n\ndef is_virtual_machine(power_parameters):\n address = power_parameters.get('power_address', None)\n\n return (address is None\n or any(x in address for x in ['virsh', 'ssh', 'qemu']))\n\n\ndef refresh_db():\n \"\"\"gets data from server and update cache\"\"\"\n print('Getting information from MaaS.')\n\n # Retrieves list of machines\n try:\n s = session()\n machines = s.Machines.read()\n powers = s.Machines.power_parameters()\n\n except MaaSError as e:\n exit_with_error(f'Could not GET machines: {e}')\n\n # Updates database info\n new_data = []\n for m in machines:\n try:\n system_id = m.get('system_id', 'UNKNOWN')\n m_power = powers[system_id]\n\n if is_virtual_machine(m_power):\n print(f'[{system_id}] [{m.get(\"hostname\")}]'\n f' [INFO] skipping, virtual machine')\n continue\n\n new_data.append(dict(\n power_address=m_power.get('power_address', ''),\n power_user=m_power.get('power_user', ''),\n power_pass=m_power.get('power_pass', ''),\n\n fqdn=m['fqdn'],\n domain=m['domain']['name'],\n hostname=m['hostname'],\n system_id=system_id,\n ip_addresses=', '.join(m['ip_addresses']),\n cpus=m['cpu_count'],\n ram=m['memory'] // 1024,\n tags=','.join(m['tag_names'])\n ))\n\n except KeyError as e:\n print(f'[{system_id}] [ERROR] Missing information: {e}')\n\n # Adds new data to the database\n print(f'Updating the database: \"{Config.sqlite_db}\"')\n MaaSCache.insert(new_data).on_conflict_replace().execute()\n db.commit()\n\n print('Done.')\n\n\ndef main():\n refresh_db()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"maasjuju_toolkit/refresh.py","file_name":"refresh.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"71563645","text":"#!usr/bin/env python\n#-*- coding:utf-8 _*-\n\"\"\"\n@author:alvin\n@file: lagouSp.py\n@time: 2019/01/15\n\"\"\"\nimport requests\nimport time\nimport csv\nimport unittest\nimport ddt\nurl='https://www.lagou.com/jobs/positionAjax.json?px=default&city=%E5%8C%97%E4%BA%AC&needAddtionalResult=false'\ndef getHeader():\n headers={\n 'Content-Type':'application/x-www-form-urlencoded; charset=UTF-8',\n 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0',\n 'Cookie':'JSESSIONID=ABAAABAAAIAACBIC8FB3664604EDAA48AB8BC31604D1068; user_trace_token=20190115202537-a9dc9212-18c0-11e9-b310-525400f775ce; LGUID=20190115202537-a9dc9461-18c0-11e9-b310-525400f775ce; index_location_city=%E5%8C%97%E4%BA%AC; PRE_UTM=; PRE_HOST=; PRE_SITE=https%3A%2F%2Fwww.lagou.com%2Fjobs%2Flist_%25E8%2587%25AA%25E5%258A%25A8%25E5%258C%2596%25E6%25B5%258B%25E8%25AF%2595%3FlabelWords%3D%26fromSearch%3Dtrue%26suginput%3D; PRE_LAND=https%3A%2F%2Fwww.lagou.com%2Fjobs%2F5426367.html; _gat=1; TG-TRACK-CODE=index_search; SEARCH_ID=3697c26520374c3583cfde00d9031423; LGSID=20190115220858-19ec7382-18cf-11e9-b66e-5254005c3644; LGRID=20190115222850-e06e0c40-18d1-11e9-b66e-5254005c3644; _ga=GA1.2.1734538929.1547555137; _gid=GA1.2.1993790329.1547555137; Hm_lvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1547555137; Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1547562530',\n 'Referer':'https://www.lagou.com/jobs/list_%E8%87%AA%E5%8A%A8%E5%8C%96%E6%B5%8B%E8%AF%95?labelWords=&fromSearch=true&suginput='\n }\n return headers\n\ndef laGou(page=5):\n positions=[]\n time.sleep(10)\n r = requests.post(url=url,\n headers=getHeader(),\n data={'first':False,\n 'pn':page,\n 'kd':\"自动化测试工程师\"\n }\n )\n # print(r.text)\n #每页15条数据,接口从0开始\n for i in range(15):\n companyFullName=r.json()['content']['positionResult']['result'][i]['companyFullName']\n district=r.json()['content']['positionResult']['result'][i]['district']\n stationname=r.json()['content']['positionResult']['result'][i]['stationname']\n education=r.json()['content']['positionResult']['result'][i]['education']\n workYear=r.json()['content']['positionResult']['result'][i]['workYear']\n positionName=r.json()['content']['positionResult']['result'][i]['positionName']\n salary=r.json()['content']['positionResult']['result'][i]['salary']\n skillLables=r.json()['content']['positionResult']['result'][i]['skillLables']\n\n position={\n '公司全称':companyFullName,\n '地区':district,\n '地点':stationname,\n '教育':education,\n '年限': workYear,\n '职位': positionName,\n '薪资': salary,\n '技能': skillLables,\n }\n positions.append(position)\n\n for item in positions:\n print(item)\n return positions\n\n\ndef writeCSV():\n\n header_csv={'公司全称','地区','地点','教育','年限','职位','薪资','技能'}\n with open('lagou.csv', 'w', newline='', encoding='gbk') as fh:\n writer_header = csv.DictWriter(fh, header_csv)\n writer_header.writeheader()\n\n for item in range(1, 8):\n positions=laGou(page=item)\n print(positions)\n #newline 空行 gbk操作系统国际化\n with open('lagou.csv','a',newline='',encoding='gbk') as f:\n writer_data = csv.DictWriter(f, header_csv)\n writer_data.writerows(positions)\n# @ddt.ddt\nclass LaGou(unittest.TestCase):\n\n @ddt.data((1,),(2,))\n @ddt.unpack\n def test_lg(self,page=2):\n positions_list=[]\n r= requests.post(\n url=url,\n headers=getHeader(),\n data={'first': False,\n 'pn': page,\n 'kd': \"自动化测试工程师\"\n }\n )\n print(r.json()['success'])\n print(r.json()['content']['positionResult']['result'][0]['salary'])\n self.assertEqual(r.json()['success'],True)\n\n\n\nif __name__ == \"__main__\":\n unittest.main(verbosity=2)","sub_path":"wuyaAPI/day12/DDTlagou.py","file_name":"DDTlagou.py","file_ext":"py","file_size_in_byte":4264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"33423335","text":"def get_forest(d):\n ret = {}\n for y, line in enumerate(d):\n for x, location in enumerate(line):\n ret[(x, y)] = location\n return ret\n\n\ndef pprint(f):\n maxx = max(f.keys(), key=lambda x: x[0])[0]\n maxy = max(f.keys(), key=lambda y: y[1])[1]\n for y in range(maxy + 1):\n row = ''\n for x in range(maxx + 1):\n row += f[(x, y)]\n print(row)\n\n\ndef get_adjacent(x, y, oldf):\n ret = []\n ret.append(oldf.get((x - 1, y - 1)))\n ret.append(oldf.get((x, y - 1)))\n ret.append(oldf.get((x + 1, y - 1)))\n ret.append(oldf.get((x - 1, y)))\n ret.append(oldf.get((x + 1, y)))\n ret.append(oldf.get((x - 1, y + 1)))\n ret.append(oldf.get((x, y + 1)))\n ret.append(oldf.get((x + 1, y + 1)))\n return [item for item in ret if item is not None]\n\n\ndef mutate(x, y, value, oldf):\n adj = get_adjacent(x, y, oldf)\n # open ground -> tree if has 3 or more adjacent trees\n if value == open_ground:\n if len([i for i in adj if i == tree]) >= 3:\n return tree\n return value\n # trees -> lumberyard if 3 or more adjacent acres are lumberyards\n elif value == tree:\n if len([i for i in adj if i == lumberyard]) >= 3:\n return lumberyard\n return value\n # lumberyard remains a lumberyard if adjacent to a tree and a lumberyard.\n elif value == lumberyard:\n if lumberyard in adj and tree in adj:\n return lumberyard\n return open_ground\n\n\ndef part1(oldf):\n for i in range(1, 11):\n newf = {}\n for p, v in oldf.items():\n newf[(p[0], p[1])] = mutate(p[0], p[1], v, oldf)\n print('minute', i)\n pprint(newf)\n print()\n oldf = newf\n wooded = len([x for x in oldf.values() if x == tree])\n lumbered = len([x for x in oldf.values() if x == lumberyard])\n # part1 answer wooded 1025 lumberyards 622 answer 637550 correct first time.\n print('wooded', wooded, 'lumberyards', lumbered, 'answer', wooded * lumbered)\n\n\ndef part2(oldf):\n # 1000000000 iterations\n for i in range(1, 1000000001):\n newf = {}\n for p, v in oldf.items():\n newf[(p[0], p[1])] = mutate(p[0], p[1], v, oldf)\n oldf = newf\n if i % 1000 == 0:\n print('answer for', i)\n wooded = len([x for x in oldf.values() if x == tree])\n lumbered = len([x for x in oldf.values() if x == lumberyard])\n # part2 answer\n print('wooded', wooded, 'lumberyards', lumbered, 'answer', wooded * lumbered)\n\n wooded = len([x for x in oldf.values() if x == tree])\n lumbered = len([x for x in oldf.values() if x == lumberyard])\n # part2 answer\n print('wooded', wooded, 'lumberyards', lumbered, 'answer', wooded * lumbered)\n # answers cycle through 7 numbers as per the part2out.txt file.\n # 1000000000 / 7 is even, so take the first number in the cycle: 201465.\n\n\nwith open('data.txt', 'r') as f:\n data = f.read().split('\\n')\nopen_ground = '.'\ntree = '|'\nlumberyard = '#'\nforest = get_forest(data)\npprint(forest)\npart2(forest)\n","sub_path":"day18/day18.py","file_name":"day18.py","file_ext":"py","file_size_in_byte":3080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"222827097","text":"# Create your views here.\n\nfrom django.shortcuts import get_object_or_404, render_to_response, redirect\nfrom django.template import RequestContext\nfrom django.http import HttpResponse\nfrom django.core.paginator import Paginator, InvalidPage, EmptyPage\nimport minimalist.system\nimport minimalist.spell\nfrom minimalist.app.models import Query, Suggestion, Parsing\nimport json\n\ndef start(request):\n return render_to_response(\"start.html\", RequestContext(request))\n\ndef home(request):\n return render_to_response( \"home.html\", RequestContext(request))\n\ndef experiment_home(request):\n return render_to_response( \"home-tablet.html\", RequestContext(request))\n\ndef query(request):\n if request.method == \"POST\":\n s = minimalist.system.System()\n input = request.POST.get(\"ingredients\")\n parser = minimalist.spell.Parser()\n\n if input != \"e.g. olive oil, vinegar, salt\":\n L = parser.parse(input)\n else:\n L = []\n\n alternates = [(tuple[1].lower(), tuple[2]) for word, ok, tuple in L if ok]\n inventory = [tuple[0].lower() for word, ok, tuple in L if ok]\n unknown = [word for word, ok, tuple in L if not ok]\n\n suggestions = s.suggest(inventory)\n query = Query(\n text=input[:400],\n resolved=json.dumps(alternates),\n uncategorised=json.dumps(unknown),\n count=len(suggestions),\n )\n\n query.save()\n\n for word, ok, tuple in L:\n p = ok and Parsing( query=query, word=word, ok=ok,\n resolved_class=tuple[0], resolved_word=tuple[1], distance=tuple[2]\n ) or Parsing(query=query, word=word, ok=ok)\n p.save()\n\n for s in suggestions:\n data = Suggestion(\n query=query,\n name=s.name,\n description=s.description,\n ingredients = s.ingredients,\n html = s.html\n )\n data.save()\n s.url = data.get_absolute_url()\n \n return redirect(query)\n\n elif request.method == \"GET\":\n\n input = request.GET.get(\"ingredients\")\n query_id = request.GET.get(\"query\")\n query=get_object_or_404(Query, pk=int(query_id))\n\n suggestions = query.suggestions.all()\n\n if suggestions:\n paginator = Paginator(suggestions, 3)\n # Make sure page request is an int. If not, deliver first page.\n try:\n page = int(request.GET.get('page', '1'))\n except ValueError:\n page = 1\n\n # If page request (9999) is out of range, deliver last page of results.\n try:\n suggestions = paginator.page(page)\n except (EmptyPage, InvalidPage):\n suggestions = paginator.page(paginator.num_pages)\n\n context = {\n \"suggestions\" : suggestions,\n \"ingredients\" : query.text,\n \"alternates\" : json.loads(query.resolved),\n \"unknown\" : json.loads(query.uncategorised),\n \"query_url\" : query.get_absolute_url(),\n }\n\n return render_to_response(\"suggest.html\", RequestContext(request, context))\n\ndef suggestion(request, suggestion_id):\n s = get_object_or_404(Suggestion, pk=suggestion_id)\n\n context = {\n 'dishtype': s.name,\n 'ingredients': ', '.join(s.ingredients),\n 'instructions' : s.html,\n 'back' : s.query.get_absolute_url(),\n }\n\n return render_to_response(\"instructions.html\", RequestContext(request, context))\n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"154388807","text":"\"\"\"\na=eval(input(\"Enter a value: \"));b=eval(input(\"Enter b value: \"))\nresult=a+b\nprint(f'The addition of {a} and {b} is: {result}')\nprint(type(a))\n\"\"\"\nmy_name= 'Nevergive'\nmy_number = 2024288812\nmy_fav_scripting=\"\"\"\npython is is this is\nis not my home\n\"\"\"\n### Special characters\n\"\"\"\nprint(my_fav_scripting)\n\nmy_str=\"\"\nmy_new_str= \" \"\nprint(f'{bool(my_str)}')\n\"\"\"\n#### slicing of a string\n\"\"\"\nmy_last_name = \"'My last name is koji'\nprint(my_last_name[:5])\n\"\"\"\n# ## Find lenght of a string\n# my_fav_scripting = 'koji bello'\n# # print(len(my_fav_scripting))\n# print(f'This lenght of my name {my_fav_scripting} is: \\n{len(my_fav_scripting)}')\n\n# my_index_posistion= 'JJTech inc is the place to be'\n# my_str='Brontech is the place to be'\n# space_str= \" \"\n# my_str3=my_index_posistion+space_str+my_str\n# print(my_str3)\n\nmy_index_posistion= 'JJTech inc is the place to be,'\nmy_str='Brontech is the place to be'\n# space_str= \" \"\nmy_str3=my_index_posistion+\" \"+my_str+ \" \"+\"Help me!\"\nprint(my_str3)\n","sub_path":"input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"10942428","text":"import pdb\nimport os\nimport json\nimport importlib\nimport sys\nimport argparse\n\n\nfrom horey.h_logger import get_logger\nfrom horey.common_utils.common_utils import CommonUtils\nlogger = get_logger()\n\n\nclass ConfigurationPolicy:\n \"\"\"\n Base class to handle Configuration Policies.\n Should be capt as simple as possible as it should run in various environments.\n ENVIRON_ATTRIBUTE_PREFIX - prefix used to specify which environ values should be used to init configuration.\n \"\"\"\n\n ENVIRON_ATTRIBUTE_PREFIX = \"horey_\"\n\n def __init__(self):\n \"\"\"\n Save all the files used to configure - used for prints in\n \"\"\"\n self._configuration_file_full_path = []\n \n @property\n def configuration_file_full_path(self):\n if len(self._configuration_file_full_path) > 0:\n return self._configuration_file_full_path[-1]\n return None\n \n @configuration_file_full_path.setter\n def configuration_file_full_path(self, value):\n if not os.path.exists(value):\n raise ValueError(f\"File does not exist: {value}\")\n\n self._configuration_file_full_path.append(value)\n\n @property\n def configuration_files_history(self):\n return self._configuration_file_full_path\n\n @configuration_files_history.setter\n def configuration_files_history(self, _):\n raise ValueError(\"Readonly property\")\n\n def _set_attribute_value(self, attribute_name, attribute_value):\n if not hasattr(self, f\"_{attribute_name}\"):\n raise ValueError(attribute_name)\n\n setattr(self, attribute_name, attribute_value)\n\n def init_from_command_line(self, parser=None):\n \"\"\"\n Very important notice: expects all values are strings. Attributes with None value - being removed.\n \"\"\"\n\n if parser is None:\n parser = self.generate_parser()\n namespace_arguments = parser.parse_args()\n dict_arguments = vars(namespace_arguments)\n\n dict_arguments = {key: value for key, value in dict_arguments.items() if value is not None}\n\n self.init_from_dictionary(dict_arguments, custom_source_log=\"Init attribute '{}' from command line argument\")\n\n def init_from_dictionary(self, dict_src, custom_source_log=None):\n \"\"\"\n\n :param dict_src:\n :param custom_source_log: Because everything is a dict we will path custom log line to indicate what is the real source of the value.\n :return:\n \"\"\"\n for key, value in dict_src.items():\n if custom_source_log is not None:\n log_line = custom_source_log.format(key)\n else:\n log_line = f\"Init attribute '{key}' from dictionary\"\n logger.info(log_line)\n self._set_attribute_value(key, value)\n\n def init_from_environ(self):\n for key_tmp, value in os.environ.items():\n key = key_tmp.lower()\n if key.startswith(self.ENVIRON_ATTRIBUTE_PREFIX):\n key = key[len(self.ENVIRON_ATTRIBUTE_PREFIX):]\n\n log_line = f\"Init attribute '{key}' from environment variable '{key_tmp}'\"\n logger.info(log_line)\n\n self._set_attribute_value(key, value)\n\n def init_from_file(self):\n if self.configuration_file_full_path is None:\n raise ValueError(\"Configuration file was not set\")\n\n if self.configuration_file_full_path.endswith(\".py\"):\n return self.init_from_python_file()\n\n def init_from_python_file(self):\n config = CommonUtils.load_object_from_module(self.configuration_file_full_path, \"main\")\n self.init_from_dictionary(config.__dict__, custom_source_log=\"Init attribute '{}' from python file: '\" + self.configuration_file_full_path + \"'\")\n\n def init_from_json_file(self):\n with open(self.configuration_file_full_path) as file_handler:\n dict_arguments = json.load(file_handler)\n self.init_from_dictionary(dict_arguments, custom_source_log=\"Init attribute '{}' from json file: '\" + self.configuration_file_full_path + \"'\")\n\n def generate_parser(self):\n \"\"\"\n This function generates a parser based on exposed parameters.\n \"\"\"\n\n \"\"\"\n parse_known_args - if Tr\n \"\"\"\n description = f\"{self.__class__.__name__} autogenerated parser\"\n parser = argparse.ArgumentParser(description=description)\n\n for parameter in self.__dict__:\n if not parameter.startswith(\"_\"):\n continue\n parameter = f\"--{parameter[1:]}\"\n parser.add_argument(parameter, type=str, required=False)\n\n return parser\n","sub_path":"configuration_policy/horey/configuration_policy/configuration_policy.py","file_name":"configuration_policy.py","file_ext":"py","file_size_in_byte":4635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"530877673","text":"from django.test import TestCase\nfrom django.core.files import File\nfrom ssl_parse.models import Certificate\n\n\nclass CertificateTest(TestCase):\n\n def test_load_cert(self):\n file = open('1.der', 'rb')\n data = File(file)\n cert = Certificate(\n file_certificate=data,\n name='Test Certificate'\n )\n cert.save()\n print(cert.id)\n","sub_path":"ssl_parse/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"196071747","text":"# -*- encoding: utf-8 -*-\n#\n# Copyright © 2020 Mergify SAS\n# Copyright © 2018 Mehdi Abaakouk \n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport typing\n\nimport daiquiri\nimport voluptuous\n\nfrom mergify_engine import context\nfrom mergify_engine import queue\nfrom mergify_engine.actions import merge_base\nfrom mergify_engine.rules import types\n\n\nif typing.TYPE_CHECKING:\n from mergify_engine import rules\n\nLOG = daiquiri.getLogger(__name__)\n\n\nclass FakePR:\n def __init__(self, key: str, value: typing.Any):\n setattr(self, key, value)\n\n\nclass MergeAction(merge_base.MergeBaseAction):\n\n validator = {\n voluptuous.Required(\"method\", default=\"merge\"): voluptuous.Any(\n \"rebase\", \"merge\", \"squash\"\n ),\n voluptuous.Required(\"rebase_fallback\", default=\"merge\"): voluptuous.Any(\n \"merge\", \"squash\", None\n ),\n voluptuous.Required(\"strict\", default=False): voluptuous.All(\n voluptuous.Any(\n bool, \"smart\", \"smart+fastpath\", \"smart+fasttrack\", \"smart+ordered\"\n ),\n voluptuous.Coerce(merge_base.strict_merge_parameter),\n merge_base.StrictMergeParameter,\n ),\n voluptuous.Required(\"strict_method\", default=\"merge\"): voluptuous.Any(\n \"rebase\", \"merge\"\n ),\n # NOTE(sileht): Alias of update_bot_account, it's now undocumented but we have\n # users that use it so, we have to keep it\n voluptuous.Required(\"bot_account\", default=None): voluptuous.Any(\n None, types.GitHubLogin\n ),\n voluptuous.Required(\"merge_bot_account\", default=None): voluptuous.Any(\n None, types.GitHubLogin\n ),\n voluptuous.Required(\"update_bot_account\", default=None): voluptuous.Any(\n None, types.GitHubLogin\n ),\n voluptuous.Required(\"commit_message\", default=\"default\"): voluptuous.Any(\n \"default\", \"title+body\"\n ),\n voluptuous.Required(\n \"priority\", default=merge_base.PriorityAliases.medium.value\n ): voluptuous.All(\n voluptuous.Any(\"low\", \"medium\", \"high\", int),\n voluptuous.Coerce(merge_base.Priority),\n int,\n voluptuous.Range(min=1, max=10000),\n ),\n }\n\n def _should_be_synced(self, ctxt: context.Context, q: queue.Queue) -> bool:\n if self.config[\"strict\"] is merge_base.StrictMergeParameter.ordered:\n return ctxt.is_behind and q.is_first_pull(ctxt)\n elif self.config[\"strict\"] is merge_base.StrictMergeParameter.fasttrack:\n return ctxt.is_behind\n elif self.config[\"strict\"] is merge_base.StrictMergeParameter.true:\n return ctxt.is_behind\n elif self.config[\"strict\"] is merge_base.StrictMergeParameter.false:\n return False\n else:\n raise RuntimeError(\"Unexpected strict\")\n\n def _should_be_queued(self, ctxt: context.Context, q: queue.Queue) -> bool:\n return True\n\n def _should_be_merged(self, ctxt: context.Context, q: queue.Queue) -> bool:\n if self.config[\"strict\"] is merge_base.StrictMergeParameter.ordered:\n return not ctxt.is_behind and q.is_first_pull(ctxt)\n elif self.config[\"strict\"] is merge_base.StrictMergeParameter.fasttrack:\n return not ctxt.is_behind\n elif self.config[\"strict\"] is merge_base.StrictMergeParameter.true:\n return not ctxt.is_behind\n elif self.config[\"strict\"] is merge_base.StrictMergeParameter.false:\n return True\n else:\n raise RuntimeError(\"Unexpected strict\")\n\n def _should_be_cancel(\n self, ctxt: context.Context, rule: \"rules.EvaluatedRule\"\n ) -> bool:\n # It's closed, it's not going to change\n if ctxt.pull[\"state\"] == \"closed\":\n return True\n\n if ctxt.have_been_synchronized():\n return True\n\n need_look_at_checks = []\n for condition in rule.missing_conditions:\n if condition.attribute_name.startswith(\n \"check-\"\n ) or condition.attribute_name.startswith(\"status-\"):\n # TODO(sileht): Just return True here, no need to checks checks anymore,\n # this method is no more used by teh merge queue\n need_look_at_checks.append(condition)\n else:\n # something else does not match anymore\n return True\n\n if need_look_at_checks:\n if not ctxt.checks:\n return False\n\n states = [\n state\n for name, state in ctxt.checks.items()\n for cond in need_look_at_checks\n if cond(FakePR(cond.attribute_name, name))\n ]\n if not states:\n return False\n\n for state in states:\n if state in (\"pending\", None):\n return False\n\n return True\n\n def get_merge_conditions(\n self,\n ctxt: context.Context,\n rule: \"rules.EvaluatedRule\",\n ) -> typing.Tuple[\"rules.RuleConditions\", \"rules.RuleMissingConditions\"]:\n return rule.conditions, rule.missing_conditions\n","sub_path":"mergify_engine/actions/merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":5704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"193858255","text":"\nfrom .test_setup import TestSetUp\nfrom rest_framework.test import APIRequestFactory\nfrom rest_framework.test import force_authenticate\nfrom django.contrib.auth.models import User\nfrom bookings.views import UserViewset, BookingsViewset\nfrom bookings.models import Bookings\n\nclass TestUserViews(TestSetUp):\n\n def test_public_user_can_read(self):\n #Let's create an user and a booking to test if an user with admin credidential can read the viewset\n user = User.objects.create(username='testuser', password='userpassword')\n Bookings.objects.create(name='test booking', date ='2021-01-21T14:53:00Z', description='test boooking', user=user)\n factory = APIRequestFactory()\n view = BookingsViewset.as_view({'get': 'list'})\n request = factory.get('/viewset/bookings/')\n response=view(request)\n response.render()\n self.assertEqual(response.status_code, 200)\n\n def test_public_user_cannot_post(self):\n #Here we just try to post a new booking without any user credidentail\n factory = APIRequestFactory()\n view = BookingsViewset.as_view({'get': 'list'})\n request = factory.post('/viewset/bookings/', self.new_booking, format='json')\n response=view(request)\n response.render()\n self.assertEqual(response.status_code, 401)\n\n def test_admin_user_can_post(self):\n factory = APIRequestFactory()\n view = BookingsViewset.as_view({'post': 'create'})\n request = factory.post('/viewset/bookings/', self.new_booking, format='json')\n request.user = self.user\n force_authenticate(request, user=self.user)\n response=view(request)\n response.render()\n self.assertEqual(response.status_code, 201)\n\n\n def test_admin_user_can_update(self):\n factory = APIRequestFactory()\n view = BookingsViewset.as_view({'get' : 'list', 'put' : 'update', 'post': 'create'})\n #let's create a booking and then update it\n Bookings.objects.create(name='test booking 2', date ='2021-01-21T14:53:00Z', description='test boooking', user=self.user)\n factory=APIRequestFactory()\n request = factory.put('/viewset/bookings/', self.updated_booking, format='json')\n request.user = self.user\n force_authenticate(request, user=self.user)\n response=view(request, pk=2)\n response.render()\n self.assertEqual(response.status_code, 200)","sub_path":"bsport/bookings/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":2425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"96628359","text":"\nimport wx\n\nfrom wx.lib.pubsub import pub as Publisher\n\nimport matplotlib\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigCanvas\nfrom mnuPlotToolbar import MyCustomToolbar as NavigationToolbar\nimport matplotlib.pyplot as plt\nimport math\nimport textwrap\nimport numpy\n\n\nclass plotBox(wx.Panel):\n\n\n def _init_coll_boxSizer1_Items(self, parent):\n # generated method, don't edit\n\n parent.AddWindow(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)\n parent.AddWindow(self.toolbar, 0, wx.EXPAND)\n\n\n\n def _init_sizers(self):\n # generated method, don't edit\n self.boxSizer1 = wx.BoxSizer(orient=wx.VERTICAL)\n self._init_coll_boxSizer1_Items(self.boxSizer1)\n self.SetSizer(self.boxSizer1)\n\n\n\n def _init_ctrls(self, prnt):\n #matplotlib.figure.Figure.__init__(self)\n wx.Panel.__init__(self, prnt, -1)\n\n\n Publisher.subscribe(self.monthly, (\"box.Monthly\"))\n Publisher.subscribe(self.yearly, (\"box.Yearly\"))\n Publisher.subscribe(self.seasonaly, (\"box.Seasonal\"))\n Publisher.subscribe(self.overall, (\"box.Overall\"))\n\n\n self.figure = matplotlib.figure.Figure()\n self.plot=self.figure.add_subplot(111)\n self.plot.axis([0, 1, 0, 1])#\n self.plot.set_title(\"No Data To Plot\")\n\n self.canvas = FigCanvas(self, -1, self.figure)\n # Create the navigation toolbar, tied to the canvas\n self.toolbar = NavigationToolbar(self.canvas, True)\n self.toolbar.Realize()\n\n\n #self.canvas.SetCursor(wx.StockCursor(wx.CURSOR_CROSS))\n #self.canvas.SetScrollbar(wx.HORIZONTAL, 0,5, 1000)\n self.SetColor(\"WHITE\")\n self.canvas.SetFont(wx.Font(20, wx.SWISS, wx.NORMAL, wx.NORMAL,\n False, u'Tahoma'))\n self.canvas.draw()\n self._init_sizers()\n\n def Clear(self):\n self.figure.clear()\n\n def GridSize(self, cells):\n rows = 1\n cols = 1\n while rows * cols < cells:\n if rows == cols:\n cols = cols + 1\n else:\n rows = rows + 1\n return rows, cols\n\n def textSize(self, cells):\n wrap = 50\n wrap = wrap-(cells*3)\n text= 20 -cells\n return wrap, text\n\n def Plot(self, seriesPlotInfo):\n self.seriesPlotInfo= seriesPlotInfo\n self.updatePlot()\n\n def updatePlot(self):\n self.Clear()\n count = self.seriesPlotInfo.count()\n rows, cols = self.GridSize(count)\n self.plots=[]\n i=1\n for oneSeries in self.seriesPlotInfo.GetSeriesInfo():\n self.plots.append(self.figure.add_subplot(repr(rows)+repr(cols)+repr(i)))\n\n\n wrap, text = self.textSize(count)\n self.plots[i-1].set_xlabel(\"\\n\".join(textwrap.wrap(oneSeries.BoxWhisker.currinterval.title, wrap)))\n self.plots[i-1].set_ylabel(\"\\n\".join(textwrap.wrap(oneSeries.variableName+ \"(\"+oneSeries.variableUnits+\")\", wrap)))\n self.plots[i-1].set_title(\"\\n\".join(textwrap.wrap(oneSeries.siteName+\" \"+oneSeries.variableName, wrap )))\n\n self.canvas.SetFont(wx.Font(text, wx.SWISS, wx.NORMAL, wx.NORMAL,\n False, u'Tahoma'))\n\n med= oneSeries.BoxWhisker.currinterval.medians\n cl= oneSeries.BoxWhisker.currinterval.confint\n mean= oneSeries.BoxWhisker.currinterval.means\n ci = oneSeries.BoxWhisker.currinterval.conflimit\n bp=self.plots[i-1].boxplot(oneSeries.BoxWhisker.currinterval.data, sym = \"-gs\", notch = True, bootstrap = 5000, conf_intervals = cl)\n\n\n # Plot Mean and its confidence interval\n for x in range(len(mean)):\n self.plots[i-1].vlines(x+1, ci[x][0], ci[x][1], color='r', linestyle = \"solid\" )\n self.plots[i-1].scatter([range(1,len(mean)+1)], mean, marker='o', c='r', s= 10)\n\n\n # Plot Median\n self.plots[i-1].scatter([range(1,len(med)+1)], med, marker='s', c=\"k\", s= 10)\n\n\n # Set Colors of the Box Whisker plot\n plt.setp(bp['whiskers'], color = 'k', linestyle = '-')\n plt.setp(bp['medians'], color = 'k', linestyle = '-')\n plt.setp(bp['boxes'], color = 'GREY', linestyle = '-')\n plt.setp(bp['caps'], color = 'k')\n plt.setp(bp['fliers'], markersize = 3.5)\n\n # self.plot.set_ybound(min(data),max(data))\n self.plots[i-1].set_autoscale_on(True)\n self.plots[i-1].set_xticklabels(oneSeries.BoxWhisker.currinterval.xlabels)\n\n i=i+1\n\n\n self.canvas.draw()\n\n\n def SetColor( self, color):\n # \"\"\"Set figure and canvas colours to be the same.\"\"\"\n self.figure.set_facecolor( color )\n self.figure.set_edgecolor( color )\n self.canvas.SetBackgroundColour( color )\n\n\n\n def monthly(self, str):\n # print \"monthly\"\n self.seriesPlotInfo.SetBoxInterval(\"Monthly\")\n self.updatePlot()\n\n def seasonaly(self, str):\n # print\"seasonal\"\n self.seriesPlotInfo.SetBoxInterval(\"Seasonally\")\n self.updatePlot()\n\n def yearly(self, str):\n # print \"yearly\"\n self.seriesPlotInfo.SetBoxInterval(\"Yearly\")\n self.updatePlot()\n\n def overall(self, str):\n # print \"overall\"\n self.seriesPlotInfo.SetBoxInterval(\"Overall\")\n self.updatePlot()\n\n\n\n\n def __init__(self, parent, id, pos, size, style, name):\n self._init_ctrls(parent)","sub_path":"src/Gui/plotBoxWhisker.py","file_name":"plotBoxWhisker.py","file_ext":"py","file_size_in_byte":5158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"654365166","text":"import numpy as np\nfrom tensorflow.keras.datasets import mnist\ndef convert_to_one_hot(y, C):\n return np.eye(C)[y.reshape(-1)].T\n\nsize=[784,30,10]\nnum=2\nweight = [np.random.randn(ch2,ch1)\n for ch1,ch2 in zip(size[:-1], size[1:])]\n # [784,30],[30,10] z=wxx+b [30,1]\nbias = [np.random.rand(s, 1) for s in size[1:]]\n\n\n(train_x, train_y), (test_x, test_y) = mnist.load_data()\ntrain_data = []\ntrain_x = train_x.reshape([60000, 784])\nfor i in range(train_x.shape[0]):\n # print(convert_to_one_hot(train_y[i],10).shape)\n train_data.append([train_x[i]/255, convert_to_one_hot(train_y[i], 10)])\n\ntest_data = []\ntest_x = test_x.reshape([10000, 784])\nfor i in range(10000):\n test_data.append([test_x[i]/255, test_y[i]])\n\n\nx,y=test_data[0]\nprint(x)\nfor b, w in zip(bias, weight):\n # [30,784]@[784,1]->[30,1]+[30,1]=[30,1]\n z = np.dot(w, x) + b\n print(z)","sub_path":".history/yjs/test_20210606182625.py","file_name":"test_20210606182625.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"308653929","text":"import uuid\nimport botocore\nfrom boto3 import resource\nfrom flask import current_app\n\nFILE_LOCATION_STRUCTURE = 'service-{}-notify/{}.csv'\n\n\ndef s3upload(service_id, filedata, region):\n s3 = resource('s3')\n bucket_name = current_app.config['CSV_UPLOAD_BUCKET_NAME']\n contents = filedata['data']\n\n exists = True\n try:\n s3.meta.client.head_bucket(\n Bucket=bucket_name)\n except botocore.exceptions.ClientError as e:\n error_code = int(e.response['Error']['Code'])\n if error_code == 404:\n exists = False\n else:\n current_app.logger.error(\n \"Unable to create s3 bucket {}\".format(bucket_name))\n raise e\n\n if not exists:\n s3.create_bucket(Bucket=bucket_name,\n CreateBucketConfiguration={'LocationConstraint': region})\n\n upload_id = str(uuid.uuid4())\n upload_file_name = FILE_LOCATION_STRUCTURE.format(service_id, upload_id)\n key = s3.Object(bucket_name, upload_file_name)\n key.put(Body=contents, ServerSideEncryption='AES256')\n\n return upload_id\n\n\ndef s3download(service_id, upload_id):\n contents = ''\n try:\n s3 = resource('s3')\n bucket_name = current_app.config['CSV_UPLOAD_BUCKET_NAME']\n upload_file_name = FILE_LOCATION_STRUCTURE.format(service_id, upload_id)\n key = s3.Object(bucket_name, upload_file_name)\n contents = key.get()['Body'].read().decode('utf-8')\n except botocore.exceptions.ClientError as e:\n current_app.logger.error(\"Unable to download s3 file {}\".format(\n FILE_LOCATION_STRUCTURE.format(service_id, upload_id)))\n raise e\n return contents\n","sub_path":"app/main/uploader.py","file_name":"uploader.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"30383182","text":"import pygame\nfrom pygame.locals import *\nfrom pygame import time\nfrom Player import Player\nfrom Game import Game\nfrom Line import Line\n\ndef maze(): \n # Contains All Game Stats/Config\n game = Game() \n \n # Player Stats/Position/Details\n player = Player(80, 223)\n \n # Maze Details\n lines = Line.generateMaze(game, 15, 20)\n\n game.getClock().reset()\n keys = pygame.key.get_pressed()\n mills = 0\n while (game.isPlaying()): \n while (game.isActive()):\n game.updateScreen(player, lines) \n \n prevKeys = keys\n keys = pygame.key.get_pressed()\n\n if (not game.isPaused()): \n # Arrow Move Events\n if (keys[pygame.K_RIGHT] or keys[pygame.K_d]):\n blocked = 0\n for line in lines:\n if ((player.getX() + player.getSpeed() >= line.getXStart()) and (player.getX() <= line.getXStart()) and (player.getY() + 6 >= line.getYStart()) and (player.getY() - 6 <= line.getYStart())):\n blocked = 1\n if ((player.getX() + 6 + player.getSpeed() >= line.getXStart()) and (player.getX() <= line.getXStart()) and (player.getY() >= line.getYStart()) and (player.getY() <= line.getYEnd())):\n blocked = 1\n if (not blocked):\n player.moveX(1)\n game.updateScore(1)\n if (keys[pygame.K_DOWN] or keys[pygame.K_s]):\n blocked = 0\n for line in lines:\n if ((player.getY() + 5 + player.getSpeed() >= line.getYStart()) and (player.getY() <= line.getYStart()) and (player.getX() + 6 > line.getXStart()) and (player.getX() - 6 < line.getXStart())):\n blocked = 1\n if ((player.getY() + 6 + player.getSpeed() >= line.getYStart()) and (player.getY() <= line.getYStart()) and (player.getX() >= line.getXStart()) and (player.getX() <= line.getXEnd())):\n blocked = 1\n if (not blocked):\n player.moveY(1)\n if (keys[pygame.K_UP] or keys[pygame.K_w]):\n blocked = 0\n for line in lines:\n if ((player.getY() - player.getSpeed() <= line.getYEnd()) and (player.getY() >= line.getYEnd()) and (player.getX() + 6 > line.getXStart()) and (player.getX() - 6 < line.getXStart())):\n blocked = 1\n if ((player.getY() - 6 - player.getSpeed() <= line.getYEnd()) and (player.getY() >= line.getYEnd()) and (player.getX() >= line.getXStart()) and (player.getX() <= line.getXEnd())):\n blocked = 1\n if (not blocked):\n player.moveY(-1)\n if (keys[pygame.K_LEFT] or keys[pygame.K_a]):\n blocked = 0\n for line in lines:\n if ((player.getX() + 1 - player.getSpeed() <= line.getXEnd()) and (player.getX() >= line.getXEnd()) and (player.getY() + 6 >= line.getYStart()) and (player.getY() - 6 <= line.getYStart())):\n blocked = 1\n if ((player.getX() - player.getSpeed() <= line.getXEnd()) and (player.getX() >= line.getXEnd()) and (player.getY() >= line.getYStart()) and (player.getY() <= line.getYEnd())):\n blocked = 1\n if (not blocked):\n player.moveX(-1)\n game.updateScore(-1)\n \n # Process game pace adjustments\n player.setX(player.getX() - game.getPace())\n for line in lines:\n line.setXStart(line.getXStart() - game.getPace())\n line.setXEnd(line.getXEnd() - game.getPace())\n\n # Position Adjustments (to prevent screen overflow) \n if (player.getX() < game.getXMin()):\n game.end()\n if (player.getX() > game.getXMax()):\n player.setX(game.getXMax())\n for line in lines:\n line.setXStart(line.getXStart() - player.getSpeed())\n line.setXEnd(line.getXEnd() - player.getSpeed())\n player.setY(max(player.getY(), game.getYMin()))\n player.setY(min(player.getY(), game.getYMax()))\n\n # Reposition lines that have been passed \n xMax = Line.getXMax(lines)\n for line in lines:\n start = line.getXStart()\n end = line.getXEnd()\n if (start < 0):\n line.setXStart(xMax)\n if (start == end):\n line.setXEnd(xMax)\n else: \n line.setXEnd(xMax + 22)\n \n # Pause Event\n if (prevKeys[pygame.K_SPACE] and not keys[pygame.K_SPACE]):\n game.changePaused(player)\n \n # Quit Events\n if (keys[pygame.K_LMETA] and keys[pygame.K_q]):\n game.end()\n if (keys[pygame.K_RMETA] and keys[pygame.K_q]):\n game.end()\n if (keys[pygame.K_LALT] and keys[pygame.K_F4]):\n game.end()\n if (keys[pygame.K_ESCAPE]):\n game.end()\n \n # Process Game Events\n for event in pygame.event.get():\n if (event.type == pygame.QUIT):\n game.end()\n\n # Process FPS\n processTime = game.getClock().getFullTime() - mills\n if (processTime <= 16):\n time.delay(16 - processTime)\n mills = game.getClock().getFullTime()\n\n # Game has ended\n game.printEndDisplay() \n # Quit Events\n keys = pygame.key.get_pressed()\n if (keys[pygame.K_y]):\n game.reset();\n player.reset(80, (game.getHeight() / 2))\n\n # Maze Details\n lines = Line.generateMaze(game, 15, 20)\n \n game.getClock().reset()\n \n if (keys[pygame.K_n]):\n game.quit()\n if (keys[pygame.K_LMETA] and keys[pygame.K_q]):\n game.quit()\n if (keys[pygame.K_RMETA] and keys[pygame.K_q]):\n game.quit()\n if (keys[pygame.K_LALT] and keys[pygame.K_F4]):\n game.quit()\n \n # Process Game Events\n for event in pygame.event.get():\n if (event.type == pygame.QUIT):\n game.quit()\n \n game.cleanup()\n exit(0)\n","sub_path":"infinite_maze/infinite_maze.py","file_name":"infinite_maze.py","file_ext":"py","file_size_in_byte":6722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"292719763","text":"from typing import Tuple, Union\n\nfrom aiogram.utils.callback_data import CallbackData\n\nfrom utils.database import get_categories, get_count_for_category\nfrom utils.keyboard_maker import InlineKeyboardButton, InlineKeyboardMarkup\n\npage_change = CallbackData(\"page_change\", \"action\", \"current\")\n\n\ndef create_pages(page: int = 1, language: str = \"ru\"):\n data: Union[str, Tuple] = get_categories(language)\n markup = InlineKeyboardMarkup()\n if data:\n\n categories = data[(page - 1) * 5: page * 5]\n if not categories:\n return\n\n for num, (category,) in enumerate(categories):\n if not category:\n continue\n text = \"{} ({}; {})\".format(category, *get_count_for_category(category))\n markup.add(InlineKeyboardButton(text=text, callback_data=category))\n\n markup.add(\n InlineKeyboardButton(text=\"<<\", callback_data=page_change.new(action=\"-\", current=page)),\n InlineKeyboardButton(text=\">>\", callback_data=page_change.new(action=\"+\", current=page)),\n )\n return markup\n","sub_path":"utils/pagination.py","file_name":"pagination.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"504013948","text":"# -*- coding:utf-8 -*-\n# @Author : LZ\n# @Time : 2018/4/13 14:06\n#lock线程之间进行互斥的锁,用于修改共享数据\nimport threading\nimport time\nlock = threading.Lock()\nnum = 0\n\nclass MyUseLockTh(threading.Thread):\n def __init__(self,name):\n threading.Thread.__init__(self, name = name)\n def run(self):\n global lock,num\n print('ininii....',self.name,num)\n time.sleep(0.1)\n lock.acquire()\n num += 1\n time.sleep(1)\n print('after.....',self.name,num)\n lock.release()\nt1 = MyUseLockTh('thread___1')\nt2 = MyUseLockTh('thread___2')\nt3 = MyUseLockTh('thread___3')\nt1.start()\nt2.start()\nt3.start()\n","sub_path":"base_python/day19_thread/base_lock1.py","file_name":"base_lock1.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"168930385","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThis module contains utility functions for OpenID handling.\n\"\"\"\n\nfrom django.db import connections\nfrom django.conf import settings\nfrom psycopg2 import sql\n\n\ndef django_user_to_openid(user):\n \"\"\"\n Converts a Django user to an OpenID using information in the ESGF user database.\n \"\"\"\n with connections['userdb'].cursor() as cursor:\n\n cursor.execute(sql.SQL(\"SELECT openid FROM {}.{} WHERE username = %s\")\n .format(sql.Identifier(settings.ESGF_USERDB_USER_SCHEMA),\n sql.Identifier(settings.ESGF_USERDB_USER_TABLE)),\n [user.username])\n\n return cursor.fetchone()[0]\n","sub_path":"esgf_auth/openid.py","file_name":"openid.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"505490492","text":"def process_cities(filename):\n with open(filename, 'rt') as f:\n for line in f:\n line = line.strip()\n if 'quit' == line.lower():\n return\n\n country, city = line.split(\",\")\n city = city.strip()\n country = country.strip()\n print(city.title(), country.title(), sep=\",\")\n\nif __name__ == \"__main__\":\n import sys\n process_cities(sys.argv[1])\n\n","sub_path":"capitals.py","file_name":"capitals.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"6533519","text":"import datetime, pygame, os, random, math, sys, ctypes\nfrom noise import snoise2\n# from time import sleep\n# from worldMap import image_grayscale_to_dict\nfrom heightMapGenerator import create_hills, create_hill_edges\nfrom waterGenerator import create_rivers, create_beach\nfrom buildingGenerator import spawn_house\nfrom pathGenerator import apply_path_sprites, generate_dijkstra_path, create_stairs\nfrom plantGenerator import create_trees\n\n\nclass Map:\n\n TILE_SIZE = 16 # Length of 1 tile in pixels\n NB_SNE = 4 # The amount of different existing small nature elements\n EXCLUDED_SNE = [1, 3, 4] # Small nature elements to keep from te map\n\n def __init__(self, width, height, max_hill_height, tall_grass_coverage, tree_coverage, rain_rate, seed):\n self.seed = seed\n self.width = width\n self.height = height\n self.max_hill_height = max_hill_height\n self.tall_grass_coverage = tall_grass_coverage\n self.tree_coverage = tree_coverage\n self.rain_rate = rain_rate\n\n self.front_doors = []\n self.tile_heights = dict()\n self.ground_layer = dict()\n self.buildings = dict()\n self.rain = dict()\n self.decoration_layer = dict()\n\n random.seed(seed)\n\n @staticmethod\n def has_tile_at_position(layer, x, y):\n return (x, y) in layer.keys()\n\n def out_of_bounds(self, x, y):\n return x < 0 or y < 0 or x >= self.width or y >= self.height\n\n def create_rain(self):\n for y in range(0, map_size_y):\n for x in range(0, map_size_x):\n if random.randint(0, 100) < self.rain_rate:\n self.rain[(x, y)] = \"r_\" + str(random.randint(1, 2))\n elif (x, y) not in self.rain.keys():\n self.rain[(x, y)] = \"r_0\"\n for y in range(0, map_size_y):\n for x in range(0, map_size_x):\n if random.randint(0, 100) < self.rain_rate:\n if \"sne_\" not in self.ground_layer.get((x, y), \"\"):\n self.rain[(x, y)] = \"r_\" + str(random.randint(3, 5))\n\n def render(self, layer):\n\n def random_grass(offset_x, offset_y):\n\n def choose_sne_type(excluded_sne):\n sne_type = random.randint(0, self.NB_SNE)\n while sne_type in excluded_sne:\n sne_type = random.randint(0, self.NB_SNE)\n\n # Turn 80 percent of the flowers into tall grass\n if sne_type == 2 and random.random() < 0.8: sne_type = 0\n # Turn 0.5 percent of the tall grass into tall grass with a hidden item\n if sne_type == 0 and random.random() < 0.005: sne_type = \"0_p\"\n\n return \"sne_\" + str(sne_type)\n\n octaves = 1\n freq = 7\n sne_probability = snoise2((x + offset_x) / freq, (y + offset_y) / freq, octaves) + 0.5\n\n if sne_probability > (self.tall_grass_coverage / 100):\n grass_type = random.randint(0, 3)\n return \"g_\" + str(grass_type)\n else:\n return choose_sne_type(self.EXCLUDED_SNE)\n\n def try_blit_tile(tile):\n try:\n screen.blit(get_tile_file(tile), (x * self.TILE_SIZE, y * self.TILE_SIZE - correction))\n except Exception as e:\n screen.blit(get_tile_file(\"missing\"), (x * self.TILE_SIZE, y * self.TILE_SIZE - correction))\n print(e)\n\n def get_tile_file(tile):\n return pygame.image.load(os.path.join(\"resources\", tile + \".png\"))\n\n for y in range(0, map_size_y):\n for x in range(0, map_size_x):\n if (x, y) in layer.keys():\n current_tile = str(layer[(x, y)])\n if \"npc_\" in layer[(x, y)]:\n correction = 3 # npc's are slightly larger than a tile\n else:\n correction = 0\n try_blit_tile(current_tile)\n elif layer == self.ground_layer:\n screen.blit(get_tile_file(random_grass(x_offset, y_offset)), (x * self.TILE_SIZE, y * self.TILE_SIZE))\n\n pygame.display.update()\n\n\nmap_size_x = 120 # The horizontal amount of tiles the map consists of\nmap_size_y = 68 # The vertical amount of tiles the map consists of\nrandom_map = Map(map_size_x, map_size_y, 4, 50, 20, 20, random.randint(0, sys.maxsize))\n#random.randint(0, sys.maxsize)\nscreen_Size_X = Map.TILE_SIZE * map_size_x\nscreen_Size_Y = Map.TILE_SIZE * map_size_y\nx_offset = random.randint(0, 1000000)\ny_offset = random.randint(0, 1000000)\n\nscreen = pygame.display.set_mode((screen_Size_X, screen_Size_Y))\n\nprint(\"*creating landscape*\")\ncreate_hills(random_map)\ncreate_rivers(random_map)\ncreate_beach(random_map)\nprint(\"*builing houses*\")\nfor house_type in range(1, 10):\n for x in range(1):\n spawn_house(random_map, house_type, \"p_3\")\nprint(\"*dijkstra*\")\ngenerate_dijkstra_path(random_map, \"p_1\")\napply_path_sprites(random_map)\n\ncreate_hill_edges(random_map)\nprint(\"*growing trees*\")\ncreate_trees(random_map, 30, x_offset, y_offset)\nprint(\"*rendering*\")\nrandom_map.render(random_map.ground_layer)\nrandom_map.render(random_map.buildings)\nrandom_map.render(random_map.decoration_layer)\n# render(buildings, False)\n# create_rain(rain, 0)\n# render(rain, False)\nprint(\"Seed: \" + str(random_map.seed))\n\nsave = input(\"Save this image? (y/n/w): \")\n\nt = datetime.datetime.now().strftime(\"%G-%m-%d %H-%M-%S\")\nif save == \"y\" or save == \"w\":\n pygame.image.save(screen, os.path.join(\"saved images\", t + \".png\"))\n cwd = os.getcwd()\n if save == \"w\": ctypes.windll.user32.SystemParametersInfoW(20, 0, os.path.join(cwd, \"saved images\", t + \".png\"), 0)\n\nquit()\n","sub_path":"mapGenerator2.py","file_name":"mapGenerator2.py","file_ext":"py","file_size_in_byte":5720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"347400703","text":"import psycopg2\nfrom datetime import datetime, timedelta\nfrom proj.config import PARAM\nfrom django.conf import settings\nimport time\n\nclass PegaEstacao(object):\n def get(self):\n conn = psycopg2.connect(settings.DATABASE_PG2['klima'])\n sql = \"\"\"\n SELECT e.\"codigo_estac\",\n max( GREATEST(data, e.data_abertura)),\n count(*) as qtd\n FROM clima_estacao e\n LEFT OUTER JOIN clima_registro r\n ON r.\"estacao_FK_id\" = e.id\n group by e.\"codigo_estac\"\n order by max( GREATEST(data, e.data_abertura))\n LIMIT \"\"\"+str(PARAM['PegaEstacao_LIMIT_query'])\n\n cursor = conn.cursor()\n cursor.execute(sql)\n colecao = []\n for row in cursor.fetchall():\n\n estacao = row[0]\n dta = row[1]\n qtd = row[2]\n\n num_data = int(time.mktime(dta.timetuple()))\n num_data = int(num_data) * 1000\n\n # Se nao for o ano atual e existir registros\n # pega o proximo ano\n ano = dta.year\n if ano != datetime.now().year and qtd > 1:\n ano += 1\n str_data = '01/01/{0}'.format(ano)\n data = datetime.strptime(str_data, '%d/%m/%Y')\n num_data = int(time.mktime(data.timetuple()))\n num_data = int(num_data) * 1000\n\n\n linha = { 'estacao': row[0],\n 'data': num_data,\n 'ano': ano\n }\n colecao.append(linha)\n\n # result = json.dumps( colecao )\n\n return colecao\n\n\n","sub_path":"scripts/pega_estacao.py","file_name":"pega_estacao.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"230102670","text":"from django.urls import path, include\nfrom .views import LoginController, UserController, SingleUserController, DeveloperController\nfrom knox import views as knox_views\n\nurlpatterns = [\n path('api/auth', include('knox.urls')),\n path('api/auth/login', LoginController.as_view(),name='login'),\n path('api/auth/user', UserController.as_view(), name='users'),\n path('api/auth/user/', SingleUserController.as_view()),\n path('api/auth/logout', knox_views.LogoutView.as_view(), name='knox_logout'),\n path('api/developer', DeveloperController.as_view())\n]","sub_path":"main/accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"78270169","text":"#!/usr/bin/python\n\n'''\n\nAll rights reserved to bluedots in cloud inc 2011-12. \nAuthor: Anoop MS\n\n'''\n\n\ndef getXML(list):\n p = ''\n for val in list:\n p = p + ''%(val)\n p = p + ''\n return p\n","sub_path":"CGI-Executables/webwutils.py","file_name":"webwutils.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"169280880","text":"import math\n\n\ndef binary_search(items, item):\n \"\"\"Find item position in given list\"\"\"\n low = 0\n high = len(items) - 1\n\n while low <= high:\n mid = math.ceil((low + high) / 2)\n guess = items[mid]\n\n if guess == item:\n return mid\n if guess > item:\n high = mid - 1\n else:\n low = mid + 1\n\n return None\n\n\n# testing part\nsearch_list = [1, 3, 5, 7, 9]\n\nfor index, item in enumerate(search_list):\n assert binary_search(search_list, item) == index\n\nassert binary_search(search_list, -1) is None\n","sub_path":"src/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"270649722","text":"#-*-encoding:utf-8 -*-\n# python\n\n# 1. 数字求和\n# num1 = input(\"请输入第一个数字: \")\n# num2 = input(\"请输入第二个数字: \")\n# sum = float(num1) + float(num2)\n\nimport random\n\n# 随机数小游戏\ndef randomGames():\n i = 1\n a = random.randin(0,100)\n b = int(input(\"请输入0-100的随机数字: \"))\n while a != b:\n if a > b:\n b = int(input(\"请输入一个比 %d, 小的数字: \"))\n else:\n b = int(input(\"请输入一个比 %d, 大的数字: \"))\n i += 1\n print(\"第%d 次输入的数字 %d 和电脑的随机数一样: %d\" %(i,b,a))\n\n# 判断输入的数字是正负数\ndef inputNumber():\n while True:\n try:\n num = int(input(\"请输入一个数字: \"))\n if(num == 0):\n print(\"当前的数为: %d\" %(num))\n elif(num > 0):\n print(\"当前的数为整数\")\n else:\n print(\"当前的数为负值\")\n break\n except ValueError:\n print(\"输入的数据,请重新输入整数!!! \")\n\n\n# 判断输入的是否为数字\ndef is_number(s):\n try:\n float(s)\n except ValueError:\n pass\n try:\n import unicodedata\n unicodedata.numeric(s)\n return True\n except ValueError:\n pass\n return False\n\n\n# 判断是否为闰年: 能够被4整除,但不能被100整除,或者能被400整除\ndef rn(num):\n try:\n res = int(num)\n if(res % 4 == 0 and res % 100 != 0) or (res % 400 == 0):\n print(\"数字: %d 是闰年!!!!!!! \" %(res))\n except ValueError:\n print(\"请输入整数!!\")\n\n# 获取最大值函数\ndef maxValue():\n nums = int(input(\"请输入数字的个数: \"))\n\n# 判断给定数内的所有的质数\ndef zs(num):\n flag = False\n import math\n for i in(2,math.sqrt(num)):\n if(num % i == 0):\n flag = True\n break\n if(not flag):\n print(\"num 是质数!!! \")\n else:\n print(\"num %d 不是质数���\" %(num))\n\n# 使用递归实现对应的操作\ndef digui(num):\n if (num > 0):\n return num * digui(num -1)\n else:\n return 1\n\n# print(digui(5))\n\n\n# 编写九九乘法表\ndef jiujiu():\n for i in range(1,10):\n for j in range(1,i + 1):\n print(\"%d * %d = %d\" %(j,i,j * i),end=\" \")\n print()\n\n# jiujiu()\n\n# 求解第n个斐波那契数列\ndef fbnq(num):\n a = 1\n b = 1\n result = 0\n if(num == 1 or num == 2):\n return 1\n else:\n for i in range(3,num + 1):\n result = a + b\n a = b\n b = result\n return result\n\n# 使用递归的方式,实现斐波那契数列\ndef fbdigui(num):\n if(num == 1):\n return 1\n if(num == 2):\n return 1\n if(num > 2):\n return fbdigui(num - 1) + fbdigui(num - 2)\n\nprint(fbnq(10))\nprint(fbdigui(10))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"com/lyn/test/pyPractice.py","file_name":"pyPractice.py","file_ext":"py","file_size_in_byte":3043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"311952616","text":"\"\"\"\nTest the `print` function.\n\"\"\"\nfrom __future__ import unicode_literals, print_function\nimport pytest\nfrom prompt_toolkit import print_formatted_text as pt_print\nfrom prompt_toolkit.formatted_text import FormattedText\nfrom prompt_toolkit.styles import Style\nfrom prompt_toolkit.utils import is_windows\n\n\nclass _Capture:\n \" Emulate an stdout object. \"\n encoding = 'utf-8'\n\n def __init__(self):\n self._data = []\n\n def write(self, data):\n self._data.append(data)\n\n @property\n def data(self):\n return b''.join(self._data)\n\n def flush(self):\n pass\n\n def isatty(self):\n return True\n\n def fileno(self):\n # File descriptor is not used for printing formatted text.\n # (It is only needed for getting the terminal size.)\n return -1\n\n\n@pytest.mark.skipif(\n is_windows(), reason=\"Doesn't run on Windows yet.\")\ndef test_print_formatted_text():\n f = _Capture()\n pt_print([('', 'hello'), ('', 'world')], file=f)\n assert b'hello' in f.data\n assert b'world' in f.data\n\n\n@pytest.mark.skipif(\n is_windows(), reason=\"Doesn't run on Windows yet.\")\ndef test_with_style():\n f = _Capture()\n style = Style.from_dict({\n 'hello': '#ff0066',\n 'world': '#44ff44 italic',\n })\n tokens = FormattedText([\n ('class:hello', 'Hello '),\n ('class:world', 'world'),\n ])\n pt_print(tokens, style=style, file=f)\n assert b'\\x1b[0;38;5;197mHello' in f.data\n assert b'\\x1b[0;38;5;83;3mworld' in f.data\n","sub_path":"tests/test_print_formatted_text.py","file_name":"test_print_formatted_text.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"501407857","text":"from django.contrib import admin\nfrom django.contrib.auth.models import Group\nfrom django.contrib.auth.admin import UserAdmin as BaseUserAdmin\n\n\nfrom .forms import UserAdminCreationForm, UserAdminChangeForm\nfrom .models import User, Shop, Product, ProductImage, Options, OptionField, ProductOptions, Tags, ProductTags,\\\n Categories, UserFavouriteShop, ShoppingCart, ProductOnCart, Address\n\n\nclass UserFavouriteShopInline(admin.TabularInline):\n model = UserFavouriteShop\n\nclass UserAdmin(BaseUserAdmin):\n # The forms to add and change user instances\n form = UserAdminChangeForm\n add_form = UserAdminCreationForm\n\n # The fields to be used in displaying the User model.\n # These override the definitions on the base UserAdmin\n # that reference specific fields on auth.User.\n list_display = ('email', 'admin')\n list_filter = ('admin',)\n fieldsets = (\n (None, {'fields': ('email', 'password', 'profile_image')}),\n ('Personal info', {'fields': ('first_name', 'last_name', 'has_shop')}),\n ('Permissions', {'fields': ('admin',)}),\n )\n # add_fieldsets is not a standard ModelAdmin attribute. UserAdmin\n # overrides get_fieldsets to use this attribute when creating a user.\n add_fieldsets = (\n (None, {\n 'classes': ('wide',),\n 'fields': ('email', 'password1', 'password2')}\n ),\n )\n search_fields = ('email',)\n ordering = ('email',)\n filter_horizontal = ()\n\n inlines = [UserFavouriteShopInline,]\n\ndef index_elastic(modeladmin, request, queryset):\n for product in queryset:\n product.creation_finished = True\n product.save()\n product.indexing()\nindex_elastic.short_description = \"Index to elastic\"\n\nclass ProductImageInline(admin.TabularInline):\n model = ProductImage\n extra = 3\n\n\nclass ProductTagsInline(admin.TabularInline):\n model = ProductTags\n\n\nclass ProductAdmin(admin.ModelAdmin):\n inlines = [ProductImageInline, ProductTagsInline, ]\n actions = [index_elastic, ]\n\n\nclass ShoppingCartInline(admin.TabularInline):\n model = ProductOnCart\n\n\nclass ShoppingCartAdmin(admin.ModelAdmin):\n inlines = [ShoppingCartInline, ]\n\n\nadmin.site.register(User, UserAdmin)\nadmin.site.register(Shop)\nadmin.site.register(Product, ProductAdmin)\nadmin.site.register(ProductOptions)\nadmin.site.register(Options)\nadmin.site.register(OptionField)\nadmin.site.register(Tags)\nadmin.site.register(Categories)\nadmin.site.register(ProductImage)\nadmin.site.register(ShoppingCart, ShoppingCartAdmin)\nadmin.site.register(Address)\n\n# Remove Group Model from admin. We're not using it.\nadmin.site.unregister(Group)\n","sub_path":"web/etsy/etsy_core/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"303156336","text":"# read in the SFSes per chromosome, combine them, and calculate pi.\n\nimport glob, argparse, math, sys, re\n\nparser = argparse.ArgumentParser(description=\"\"\"give this function a regex which corresponds to a set of\n SFS files, and it will read them in and calculate and print\n pi and Tajima's D\"\"\")\n\nparser.add_argument('pattern', help='a regex that corresponds to a set of SFS files that you want to calculate summary statistics on')\nargs = parser.parse_args()\npattern = args.pattern\n\n\ndef sum_SFSs(*args):\n if not args:\n print(\"sum_SFSs: no SFS(s) defined\")\n return\n\n sfs_list = [i for i in args]\n if not all(len(i) == len(sfs_list[0]) for i in sfs_list):\n print(\"sum_SFSs: not all SFSes are the same length\")\n return\n\n summed_sfs = [sum(i) for i in zip(*sfs_list)]\n return summed_sfs;\n\n# WRONG COS DOESN'T NECESSARILY CALCULATE N PROPERLY FROM FOLDED SFS\n# def pi_from_fSFS(SFS, per_site = True):\n# \tif sum(SFS) == 0:\n# \t\treturn -99\n# \tN = (len(SFS) - 1) * 2\n# \tl = len(SFS)\n# \tbinom = (N * (N-1))/2\n# \tpi = sum([(1.0*i*(N-i)*(SFS[i]))/(binom) for i in range(l) if i != 0])\n# \tif per_site == True:\n# \t\treturn pi/sum(SFS)\n# \telse:\n# \t\treturn pi\n\ndef pi_from_uSFS(SFS,per_site = True):\n\tif sum(SFS) ==0:\n\t\treturn -99\n\tN = len(SFS) - 1\n\tbinom = (N * (N -1))/2\n\tpi = sum([(1.0*i*(N-i)*(SFS[i]))/(binom) for i in range(N) if i != 0])\n\tif per_site == True:\n\t\treturn pi/sum(SFS)\n\telse:\n\t\treturn pi\n\n# WRONG COS DOESN'T NECESSARILY CALCULATE N PROPERLY FROM FOLDED SFS\n# def tajimaD_from_fSFS(SFS):\n# #if len(SFS) <2: return None\n# th_pi = pi_from_fSFS(SFS, per_site=False)\n# N = (len(SFS) - 1) * 2\n# S = sum(SFS[1:])\n# length = sum(SFS)\n# a1 = sum([1.0/i for i in range(1, N)])\n# a2 = sum([1.0/(i**2) for i in range(1, N)])\n# b1 = float(N+1)/(3*(N-1))\n# b2 = float(2 * ( (N**2) + N + 3 )) / (9*N*(N-1))## Tajima 1989 Equation 9\n# c1 = b1 - 1.0/a1\n# c2 = b2 - float(N+2)/(a1 * N) + float(a2)/(a1 ** 2)\n# e1 = float(c1) / a1\n# e2 = float(c2) / ( (a1**2) + a2 )\n# if ((e1 * S )+ ((e2 * S) * (S - 1))) == 0.0: # Tajima 1989 Equation 38\n# return -99\n# else:\n# D = (float(th_pi - (float(S)/(a1)))/math.sqrt((e1 * S )+ ((e2 * S) * (S - 1) )))\n# return D\n\ndef tajimaD_from_uSFS(SFS):\n\t#if len(SFS) <2: return None\n\tth_pi = pi_from_uSFS(SFS,per_site=False)\n\tN = len(SFS)-1\n\tS = sum(SFS[1:-1])\n\tlength = sum(SFS)\n\ta1 = sum([1.0/i for i in range(1, N)])\n\ta2 = sum([1.0/(i**2) for i in range(1, N)])\n\tb1 = float(N+1)/(3*(N-1))\n\tb2 = float(2 * ( (N**2) + N + 3 )) / (9*N*(N-1))## Tajima 1989 Equation 9\n\tc1 = b1 - 1.0/a1\n\tc2 = b2 - float(N+2)/(a1 * N) + float(a2)/(a1 ** 2)\n\te1 = float(c1) / a1\n\te2 = float(c2) / ( (a1**2) + a2 )\n\tif ((e1 * S )+ ((e2 * S) * (S - 1))) == 0.0: # Tajima 1989 Equation 38\n\t\treturn -99\n\telse:\n\t\tD = (float(th_pi - (float(S)/(a1)))/math.sqrt((e1 * S )+ ((e2 * S) * (S - 1) )))\n\t\treturn D\n\n# WRONG COS DOESN'T NECESSARILY CALCULATE N PROPERLY FROM FOLDED SFS\n# def theta_W_from_fSFS(SFS,per_site=True):\n# \tif len(SFS) ==0: return -99\n# \tN = (len(SFS)-1) * 2\n# \tS = sum(SFS[1:]) ## this slice takes the interior of the SFS from a folded SFS, gets S\n# \tharmonic = sum(1.0/d for d in range(1, N))\n# \tif per_site == True:\n# \t\treturn float(S)/(harmonic*sum(SFS))\n# \telse:\n# \t\treturn float(S)/(harmonic)\n\ndef theta_W_from_uSFS(SFS,per_site=True):\n\tif len(SFS) ==0: return -99\n\tN = len(SFS)-1\n\tS = sum(SFS[1:-1]) ## this slice takes the interior of the SFS, gets S\n\tharmonic = sum(1.0/d for d in range(1, N))\n\tif per_site == True:\n\t\treturn float(S)/(harmonic*sum(SFS))\n\telse:\n\t\treturn float(S)/(harmonic)\n\n#SFS1 = [295478, 3778, 1827, 1112, 761, 635, 484, 439, 395, 423, 213]\n#SFS2 = [411028, 5007, 2305, 1362, 1043, 736, 641, 580, 517, 500, 271]\n\n#totalSFS = sum_SFSs(SFS1, SFS2)\n#print(totalSFS)\n\nfileList = glob.glob(str('*' + pattern + '*'))\n#print(fileList)\n\nYregex = re.compile('chrY')\nYlist = list(filter(Yregex.search, fileList))\n#print('Ylist is ' + str(Ylist))\n\nXregex = re.compile('chrX')\n#print(Xregex)\nXlist = list(filter(Xregex.search, fileList))\n#print('Xlist is ' + str(Xlist))\n\nautosomalList = fileList\nif Ylist:\n autosomalList.remove(Ylist[0])\nif Xlist:\n autosomalList.remove(Xlist[0])\n#print(autosomalList)\n\n\nSFSs_x = []\nfor FILE in Xlist:\n f = open(FILE, 'r')\n SFSs_x.append(list(map(int, f.readline().strip('\\n').split(' '))))\n\nSFSs_a = []\nfor FILE in autosomalList:\n f = open(FILE, 'r')\n SFSs_a.append(list(map(int, f.readline().strip('\\n').split(' '))))\n\ntotal_autosomal_SFS = sum_SFSs(*SFSs_a)\n# print(','.join([str(x) for x in total_autosomal_SFS]))\nprint('autosomal SFS is ' + str(total_autosomal_SFS))\nprint('total autosomal sites: ' + str(sum(total_autosomal_SFS)))\nprint('autosomal pi per site is ' + str(pi_from_uSFS(total_autosomal_SFS)))\nprint('autosomal Tajima\\'s D is ' + str(tajimaD_from_uSFS(total_autosomal_SFS)))\nprint('autosomal Watterson\\'s theta is ' + str(theta_W_from_uSFS(total_autosomal_SFS)))\n\nif Xlist:\n total_Xlinked_SFS = sum_SFSs(*SFSs_x)\n print('Xlinked SFS is ' + str(total_Xlinked_SFS))\n print('Xlinked pi per site is ' + str(pi_from_uSFS(total_Xlinked_SFS)))\n print('Xlinked Tajima\\'s D is ' + str(tajimaD_from_uSFS(total_Xlinked_SFS)))\n print('Xlinked Watterson\\'s theta is ' + str(theta_W_from_uSFS(total_Xlinked_SFS)))\n\n\n#SFSdict = list(zip(fileList, SFSs))\n#print(SFSdict)\n","sub_path":"combine_SFSes_get_sumstats.py","file_name":"combine_SFSes_get_sumstats.py","file_ext":"py","file_size_in_byte":5489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"74149301","text":"import requests\r\nfrom datetime import datetime, timedelta\r\nimport json\r\nimport csv\r\nfrom time import sleep\r\nfrom collections import defaultdict\r\nimport time\r\n\r\n\r\n#authorization\r\napi_key = \"eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJqdGkiOiI3YTQ2OTJkMC1mMjc2LTAxMzctYzk2ZC0wYjdlY2UzYmI5NjYiLCJpc3MiOiJnYW1lbG9ja2VyIiwiaWF0IjoxNTc0NzcxNDM5LCJwdWIiOiJibHVlaG9sZSIsInRpdGxlIjoicHViZyIsImFwcCI6InJvbmR1bGtpbi1nbWFpIn0.ZjoJPr_DmRgOy2eK2pA1GQy5vekA39N8XU8pfInbhAM\"\r\nmatches_endpoint = \"https://api.pubg.com/shards/steam/matches/\"\r\nheader = {\r\n \"Authorization\": \"Bearer \" + api_key,\r\n \"Accept\": \"application/vnd.api+json\"\r\n}\r\n\r\n#number of samples\r\nsamples_num = 14\r\n\r\n#the headers of the csv\r\nheaders = [\"matchId\",'playerMatchId', 'DBNOs', 'assists', 'boosts', 'damageDealt', 'deathType', 'headshotKills', 'heals',\r\n 'killPlace', 'killStreaks', 'kills',\r\n 'longestKill', 'name', 'playerId', 'revives', 'rideDistance', 'roadKills', 'swimDistance', 'teamKills',\r\n 'timeSurvived', 'vehicleDestroys',\r\n 'walkDistance', 'weaponsAcquired', 'winPlace', 'createdAt', 'stats', 'isCustomMatch', 'seasonState',\r\n 'duration', 'gameMode', 'titleId','matchType',\r\n 'shardId', 'tags', 'mapName', \"rosterId\", \"won\", \"rank\", \"teamId\"]\r\n\r\n#check if the match already exists\r\ncovered_matches = set()\r\n\r\n#write the csv\r\nwith open('Clean.csv', mode='w') as data_file:\r\n writer = csv.DictWriter(data_file, fieldnames=headers)\r\n writer.writeheader()\r\n\r\n #start loop for samples from 0 to 13\r\n for sample_num in range(samples_num):\r\n try:\r\n\r\n #set the date with help of the loop\r\n date = (datetime.utcnow() - timedelta(days=sample_num+1, seconds=1)).strftime(\"%Y-%m-%dT%H:%M:%SZ\")\r\n\r\n #endpoint for samples\r\n samples_endpoint = f\"https://api.pubg.com/shards/steam/samples?filter[createdAt-start]={date}\"\r\n\r\n #request server for samples\r\n r = requests.get(samples_endpoint, headers=header)\r\n\r\n #the response in r.text\r\n response = json.loads(r.text)\r\n\r\n #navigate through data\r\n matches = response[\"data\"][\"relationships\"][\"matches\"][\"data\"]\r\n\r\n #for each object in data look at matches ID\r\n for index, match in enumerate(matches):\r\n \r\n #if the match is already in covered matches, skip this\r\n if match[\"id\"] in covered_matches:\r\n continue\r\n\r\n #if not, add match ID to set \"covered_matches\"\r\n covered_matches.add(match[\"id\"])\r\n try:\r\n\r\n #request server for matches with ID\r\n r = requests.get(matches_endpoint + match[\"id\"], headers=header)\r\n\r\n #response in r.text\r\n match_data = json.loads(r.text)\r\n\r\n #navigation to attributes\r\n attributes = match_data[\"data\"][\"attributes\"]\r\n\r\n #navigation to included\r\n players = match_data[\"included\"]\r\n \r\n #write the csv with its headers\r\n players_data = defaultdict(dict)\r\n for player in players:\r\n if player[\"type\"] == \"participant\":\r\n players_data[player[\"id\"]] = {\r\n **{\"matchId\": match[\"id\"], \"playerMatchId\": player[\"id\"]},\r\n **player[\"attributes\"][\"stats\"],\r\n **attributes,\r\n **{\r\n \"rosterId\": None, \"won\": None, \"rank\": None, \"teamId\":None\r\n }\r\n }\r\n elif player[\"type\"] == \"roster\":\r\n for participant in player[\"relationships\"][\"participants\"][\"data\"]:\r\n players_data[participant[\"id\"]] = {\r\n **players_data[participant[\"id\"]],\r\n **{\"rosterId\": player[\"id\"], \"won\": player[\"attributes\"][\"won\"], **player[\"attributes\"][\"stats\"]}\r\n }\r\n\r\n # if (index + 1) % 9 == 0:\r\n # end_time = time.time()\r\n # elapsed = int(end_time - start_time)\r\n # print(f\"Going to sleep for {61 - elapsed} seconds\")\r\n # sleep(61 - elapsed)\r\n\r\n except Exception as e:\r\n print(\"error sleeping \\n\"+str(e))\r\n sleep(60)\r\n\r\n for playerId, player in players_data.items():\r\n writer.writerow(player)\r\n\r\n print(f\"Finished match number {index} out of {len(matches)} matches of sample number {sample_num}\")\r\n if (sample_num + 1) % 10 == 0:\r\n print(\"About to exceed sample quota going to sleep\")\r\n sleep(60)\r\n\r\n\r\n except Exception as e:\r\n print(\"error: \" + str(e))\r\n sleep(60)\r\n\r\n","sub_path":"1) collect_data.py","file_name":"1) collect_data.py","file_ext":"py","file_size_in_byte":5130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"625120164","text":"import env\nimport os\nimport sys\n\nfrom common import descr_argument_parser\nfrom config_descr import NAME\nfrom common.config import get_global_property, get_global_property_description\nfrom common.shared_logger import g_logger\nfrom common.utils import unix_path\nimport internal.svn.command as svn\nfrom common.description import Description\n\n\ndef get_context_description():\n return Description({\n NAME.data_dir: {\n 'default': get_global_property(NAME.data_dir),\n 'help': get_global_property_description(NAME.data_dir)['help'],\n },\n NAME.data_source_dir: {\n 'default': get_global_property(NAME.data_source_dir),\n 'help': get_global_property_description(NAME.data_source_dir)['help'],\n },\n \"fix\": {\n 'default': False,\n 'help': 'fix spaces by replacing them with _ (by default only shows list of problems)',\n }\n })\n\n\ndef get_input_context(*argv, **kwargs):\n parser = descr_argument_parser.DescrArgumentParser(description='Fix spaces in psd')\n parser.add_descr(get_context_description())\n return parser.get_context(*argv, **kwargs)\n\n\ndef find_references_in_file(config_file_path, sprites_to_fix, fix):\n g_logger.debug(\"find_references_in_file %s\" % config_file_path)\n config_file_name = os.path.basename(config_file_path)\n found = False\n with open(config_file_path, 'r') as f:\n config_file_lines = f.readlines()\n line_num = 0\n for config_file_line in config_file_lines:\n line_num += 1\n for sprite_path, fixed_sprite_path in sprites_to_fix.iteritems():\n if sprite_path in config_file_line:\n found = True\n if fix:\n config_file_line = config_file_line.replace(sprite_path, fixed_sprite_path)\n g_logger.info(\"fixed \\\"%s\\\" on line %d in %s\" % (sprite_path, line_num, config_file_name) )\n else:\n g_logger.info(\"found \\\"%s\\\" on line %d in %s\" % (sprite_path, line_num, config_file_name) )\n if fix:\n config_file_lines[line_num-1] = config_file_line\n if found and fix:\n with open(config_file_path, 'w') as f:\n f.writelines(config_file_lines)\n\n\ndef find_all_references(configs_particles_dirs, sprites_to_fix, fix):\n for configs_particles_dir in configs_particles_dirs:\n for root, dirs, files in os.walk(configs_particles_dir):\n for file_name in files:\n if file_name.endswith(\".yaml\"):\n absolute_file_path = os.path.join(root, file_name)\n find_references_in_file(absolute_file_path, sprites_to_fix, fix)\n\n\ndef fix_filename(folder_path, file_name):\n fixed_file_name = file_name.replace(\" \", \"_\")\n file_path = os.path.join(folder_path, file_name)\n fixed_file_path = os.path.join(folder_path, fixed_file_name)\n svn.move(folder_path, file_path, fixed_file_path)\n\n\ndef fix_spaces_in_psd(data_source_dir, configs_particles_dirs, gfx_particles_dir, fix=False):\n sprites_to_fix = dict()\n for root, dirs, files in os.walk(gfx_particles_dir):\n relative_root = os.path.relpath(root, data_source_dir)\n for file_name in files:\n g_logger.info(\"process \\\"%s\\\"\" % file_name)\n if file_name.endswith(\".psd\") and ' ' in file_name:\n relative_file_path = os.path.join(relative_root, file_name)\n sprite_path = unix_path(relative_file_path[:-4]) + \"\\\"\"\n fixed_sprite_path = sprite_path.replace(\" \", \"_\")\n sprites_to_fix[sprite_path] = fixed_sprite_path\n if fix:\n g_logger.info(\"fix \\\"%s\\\"\" % file_name)\n fix_filename(root, file_name)\n find_all_references(configs_particles_dirs, sprites_to_fix, fix)\n\n\ndef do(context):\n g_logger.info(context)\n gfx_particles_dir = os.path.join(context[NAME.data_source_dir], \"Gfx\", \"Particles\")\n configs_particles_dirs = [os.path.join(context[NAME.data_dir], \"Configs\", \"Particles\"), os.path.join(context[NAME.data_dir], \"Configs\", \"ParticlesHigh\"), os.path.join(context[NAME.data_dir], \"Configs\", \"ParticlesLow\")]\n fix_spaces_in_psd(context[NAME.data_source_dir], configs_particles_dirs, gfx_particles_dir, context[\"fix\"])\n\nif __name__ == '__main__':\n sys.exit(do(get_input_context()))\n","sub_path":"Scripts/helpers/fix_spaces_in_psd.py","file_name":"fix_spaces_in_psd.py","file_ext":"py","file_size_in_byte":4404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"505825695","text":"# -*- coding: utf-8 -*-\n\n\n\n\nimport matplotlib.pyplot as plt\nimport matplotlib.cbook as cbook\n\nimport numpy as np\nimport pandas as pd\n\n\n\nfname = cbook.get_sample_data('msft.csv', asfileobj=False)\nwith cbook.get_sample_data('msft.csv') as file:\n msft = pd.read_csv(file)\n\n\npd.plotting.register_matplotlib_converters()\n\nwith cbook.get_sample_data('msft.csv') as file:\n msft = pd.read_csv(file, parse_dates=['Date'])\n\n\n\n# Deprecated:\nplt.plotfile(fname, (0, 5, 6))\n\n# Use instead:\nmsft.plot(0, [5, 6], subplots=True)\n\n\n\n# Deprecated:\nplt.plotfile(fname, ('date', 'volume', 'adj_close'))\n\n# Use instead:\nmsft.plot(\"Date\", [\"Volume\", \"Adj. Close*\"], subplots=True)\n\n\n\n# Deprecated:\nplt.plotfile(fname, ('date', 'volume', 'adj_close'),\n plotfuncs={'volume': 'semilogy'})\n\n# Use instead:\nfig, axs = plt.subplots(2, sharex=True)\nmsft.plot(\"Date\", \"Volume\", ax=axs[0], logy=True)\nmsft.plot(\"Date\", \"Adj. Close*\", ax=axs[1])\n\n\n\n# Deprecated:\nplt.plotfile(fname, (0, 5, 6), plotfuncs={5: 'semilogy'})\n\n# Use instead:\nfig, axs = plt.subplots(2, sharex=True)\nmsft.plot(0, 5, ax=axs[0], logy=True)\nmsft.plot(0, 6, ax=axs[1])\n\n\n\n# Deprecated:\nplt.plotfile(fname, ('date', 'open', 'high', 'low', 'close'), subplots=False)\n\n# Use instead:\nmsft.plot(\"Date\", [\"Open\", \"High\", \"Low\", \"Close\"])\n\n\n\n# Deprecated:\nplt.plotfile(fname, (0, 5, 6), plotfuncs={5: \"bar\"})\n\n# Use instead:\nfig, axs = plt.subplots(2, sharex=True)\naxs[0].bar(msft.iloc[:, 0], msft.iloc[:, 5])\naxs[1].plot(msft.iloc[:, 0], msft.iloc[:, 6])\nfig.autofmt_xdate()\n\n\n\nfname2 = cbook.get_sample_data('data_x_x2_x3.csv', asfileobj=False)\nwith cbook.get_sample_data('data_x_x2_x3.csv') as file:\n array = np.loadtxt(file)\n\n\n\n# Deprecated:\nplt.plotfile(fname2, cols=(0, 1, 2), delimiter=' ',\n names=['$x$', '$f(x)=x^2$', '$f(x)=x^3$'])\n\n# Use instead:\nfig, axs = plt.subplots(2, sharex=True)\naxs[0].plot(array[:, 0], array[:, 1])\naxs[0].set(ylabel='$f(x)=x^2$')\naxs[1].plot(array[:, 0], array[:, 2])\naxs[1].set(xlabel='$x$', ylabel='$f(x)=x^3$')\n\n\n\n# For simplicity of the example we reuse the same file.\n# In general they will be different.\nfname3 = fname2\n\n# Depreacted:\nplt.plotfile(fname2, cols=(0, 1), delimiter=' ')\nplt.plotfile(fname3, cols=(0, 2), delimiter=' ',\n newfig=False) # use current figure\nplt.xlabel(r'$x$')\nplt.ylabel(r'$f(x) = x^2, x^3$')\n\n# Use instead:\nfig, ax = plt.subplots()\nax.plot(array[:, 0], array[:, 1])\nax.plot(array[:, 0], array[:, 2])\nax.set(xlabel='$x$', ylabel='$f(x)=x^3$')\n\nplt.show()","sub_path":"Day58.py","file_name":"Day58.py","file_ext":"py","file_size_in_byte":2507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"433870168","text":"from collections import Counter\nimport re\nimport sys\nimport os\n\n\ndef load_data(filepath):\n with open(filepath, 'rt', encoding='utf8') as file_obj:\n text = file_obj.read()\n return text\n\n\ndef get_most_frequent_words(text):\n words = re.findall(r'\\w+', text.lower())\n counter_obj = Counter(words)\n amount_frequent_words = 10\n most_frequent_words = counter_obj.most_common(amount_frequent_words)\n return most_frequent_words\n\n\nif __name__ == '__main__':\n if len(sys.argv) == 1 or os.path.exists(sys.argv[1]) is False:\n sys.exit('Не указан входной файл или он не существует')\n frequent_words = get_most_frequent_words(load_data(sys.argv[1]))\n print('Самые частые слова в тексте и их частота')\n for word, count in frequent_words:\n print (word, ' : ', count)\n","sub_path":"lang_frequency.py","file_name":"lang_frequency.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"461871814","text":"def almostIncreasingSequence(sequence, count):\n size = len(sequence) - 1 # Size is the length of the array\n if size < 2:\n return True\n center = size // 2 # i is the center index of the array\n if sequence[center] < sequence[center - 1]:\n count += 1\n if count >= 2:\n return False\n arTwo = sequence[center + 1:]\n arOne = sequence[:center]\n return almostIncreasingSequence(arOne, count), almostIncreasingSequence(arTwo, count)\n\narr = [1, 2, 3, 5, 7, 4, 5, 6, 7, 8, 9]\nprint(almostIncreasingSequence(arr, 0))\n","sub_path":"CodeSignal/almostIncreasingSequence.py","file_name":"almostIncreasingSequence.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"427379842","text":"import re\r\n\r\n__max_random_state = 10 ** 5\r\n\r\n\r\ndef w_shingles(text, shingle_size=10, split_regex=r\"([\\w]+'?[\\w]+)\",\r\n case_sensitive=False, stop_words=[],\r\n stemmer=lambda x: x):\r\n \"\"\"\r\n Make w-shingle (a tuple of 'shingles' - n-grams).\r\n http://en.wikipedia.org/wiki/W-shingling\r\n\r\n Parameters\r\n :param text: the document do process\r\n :param shingle_size: length of one shingle\r\n :param split_regex: regex to tokenize text\r\n :default r\"([\\w]+'?[\\w]+)\" - extract words\r\n :param case_sensitive: flag whether of not lowercase all letters\r\n :default False\r\n :param stop_words: a list of stop words\r\n (stop words are the words that will be removed)\r\n :param stemmer: a function to stem each token\r\n usually it a function for reducing inflected (or sometimes derived)\r\n words to their word stem.\r\n :return: a list of shingles (also list)\r\n\r\n Examples\r\n >>> w_shingles(\"a rose is a rose is a rose\", 4, stop_words=['a'])\r\n [['rose', 'is', 'rose', 'is'], ['is', 'rose', 'is', 'rose']\r\n >>> w_shingles('Hello, world!!!', 1, case_sensitive=True)\r\n [['Hello'], ['world']]\r\n \"\"\"\r\n\r\n if shingle_size == 0:\r\n return []\r\n elif shingle_size < 0:\r\n raise ValueError(\"size must be positive\")\r\n\r\n if type(split_regex) == str:\r\n tokenizer = re.compile(split_regex, re.UNICODE)\r\n else:\r\n tokenizer = split_regex\r\n\r\n split = [stemmer(item) for item in tokenizer.findall(text)]\r\n\r\n if not case_sensitive and split:\r\n split = [item.lower() for item in split]\r\n\r\n if stop_words:\r\n split = [item for item in split if item not in stop_words]\r\n\r\n return [split[i:(i + shingle_size)]\r\n for i in range(0, len(split) - shingle_size + 1)]\r\n\r\n\r\ndef get_hashes(shingles, hash_func, state=0):\r\n return [hash_func(bytearray(' '.join(shingle), encoding='utf-8'), state) for shingle in shingles]\r\n\r\n\r\ndef get_max_hash(shingles, hash_func, state):\r\n return max(get_hashes(shingles, hash_func, state))\r\n\r\n\r\ndef get_min_hash(shingles, hash_func, state):\r\n return min(get_hashes(shingles, hash_func, state))\r\n\r\n\r\ndef get_extreme_hash(shingles, hash_func, state):\r\n hashes = get_hashes(shingles, hash_func, state)\r\n return min(hashes), max(hashes)\r\n\r\n\r\ndef make_minhash_array(shingles, hash_func, states):\r\n \"\"\"\r\n MinHash (or the min-wise independent permutations locality\r\n sensitive hashing scheme) is a technique for quickly\r\n estimating how similar two sets are.\r\n :param shingles: shingles to find MinHashes\r\n :param hash_func: hash function\r\n :param states: state to random generator\r\n :return: array of MinHashes for each hash\r\n \"\"\"\r\n return [get_min_hash(shingles, hash_func, state) for state in states]\r\n\r\n\r\ndef count_equal_hash(hash1, hash2):\r\n return len([0 for h1, h2 in zip(hash1, hash2) if h1 == h2])\r\n\r\n\r\ndef compare_texts(text1, text2, power=128, hash_func=None, **kwargs):\r\n \"\"\"\r\n Compare texts by create shingles, calculate hash and compare minhashes.\r\n :param text1: string, text to compare\r\n :param text2: string, text to compare\r\n :param power: number of hashes to generate\r\n :param hash_func: hash function to compute hash values,\r\n if not defined: zlib.adler32\r\n :param kwargs: args for get_shingles function\r\n :return: number of matches\r\n \"\"\"\r\n if not hash_func:\r\n from zlib import adler32\r\n hash_func = adler32\r\n\r\n from random import randint\r\n states = [randint(1, __max_random_state) for _ in range(power)]\r\n\r\n sh1 = w_shingles(text1, **kwargs)\r\n sh2 = w_shingles(text2, **kwargs)\r\n hashes = [make_minhash_array(sh1, hash_func, states),\r\n make_minhash_array(sh2, hash_func, states)]\r\n\r\n return count_equal_hash(hashes[0], hashes[1])\r\n\r\n\r\ndef sim_hash(text, states=128, seed=None, hash_func=None, **kwargs):\r\n \"\"\"\r\n Compute SimHash value for the text.\r\n :param text: text to operate\r\n :param states: number of states or list of them\r\n :param seed: if defined setup random seed\r\n :param hash_func: hash function to use\r\n :param kwargs: arguments that will be passed to w-shingle function\r\n :return: binary string representation of SimHash\r\n \"\"\"\r\n if not hash_func:\r\n from zlib import adler32\r\n hash_func = adler32\r\n\r\n if type(states) is int:\r\n from random import randint\r\n if seed:\r\n import random\r\n random.seed(seed)\r\n\r\n states = [randint(1, __max_random_state)\r\n for _ in range(states)]\r\n\r\n shingles = w_shingles(text, **kwargs)\r\n if not shingles:\r\n return None\r\n\r\n hashes = [bin(h) for h\r\n in make_minhash_array(shingles, hash_func, states)]\r\n max_length = max([len(h) for h in hashes])\r\n\r\n result = [0 for x in range(max_length)]\r\n for h in hashes:\r\n for i, s in enumerate(h):\r\n result[i] += 1 if s == '1' else -1\r\n\r\n return ''.join(['0' if i < 0 else '1' for i in result])\r\n\r\n\r\ndef make_shifts(string_hash, shift_length):\r\n pass\r\n","sub_path":"ShingleProject/shingles.py","file_name":"shingles.py","file_ext":"py","file_size_in_byte":5195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"530330637","text":"import os\n\n\nSERVICE_PORT = os.environ.get(\"SERVICE_PORT\", 5000)\nWORKERS = os.environ.get(\"GUNICORN_WORKERS\", 3)\nTHREADS = os.environ.get(\"GUNICORN_THREADS\", 3)\n\nbind = f\"0.0.0.0:{SERVICE_PORT}\"\n\nchdir = \"/code/\"\n\nworkers = WORKERS\nworker_class = \"gthread\"\nthreads = THREADS\n\n# This flag makes gunicorn import package only once\n# so that it could fail fast if something is wrong with imported package.\npreload_app = True\n","sub_path":"docker/gunicorn_cfg.py","file_name":"gunicorn_cfg.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"556596688","text":"from setuptools import setup, find_packages\n\nrequires = [ \"py-etcd>=0.1.0\", \"gipc>=0.4.0\", \"pyyaml>=3.10\" ]\n\nsetup(name=\"crow\",\n version=\"0.1.1\",\n platforms='any',\n packages = find_packages(),\n include_package_data=True,\n install_requires=requires,\n author = \"Bogdan Gaza\",\n author_email = \"bc.gaza@gmail.com\",\n url = \"https://github.com/hurrycane/crow\",\n description = \"\"\"Service ochestration based on etcd\"\"\",\n entry_points = {'console_scripts': [ 'crow-agent = crow.agent.runner:execute_from_cli' ]},\n test_requirements = [],\n classifiers = [\n \"Topic :: System :: Distributed Computing\",\n \"Topic :: Software Development :: Libraries\",\n \"License :: OSI Approved :: MIT License\",\n \"Topic :: Database :: Front-Ends\",\n ]\n)\n","sub_path":"pypi_install_script/crow-0.1.1.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"536280185","text":"T = int(input())\nfor tc in range(1, T+1):\n N, M = map(int, input().split())\n member = list(range(1, N+1))\n group = []\n result = 0\n for _ in range(M):\n a = list(map(int, input().split()))\n if len(a) == 2:\n if a[0] in member:\n group.append(a[0])\n if a[1] in member and a[1] not in group:\n member.remove(a[1])\n print(member, group)\n # number = group.count(1)+group.count(0)\n # if group.count(2) == 1:\n # number -= 1\n # elif group.count(2) == 1:\n # number -= 1\n \n # print('#{} {}'.format(tc, ))\n ","sub_path":"swea_study/problem/7465_ChangYoung_number_of_group.py","file_name":"7465_ChangYoung_number_of_group.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"601467039","text":"import re\n\nfrom bibchex.util import unlatexify\nfrom bibchex.strutil import merge_lines, crush_spaces, split_at_multiple\n\n# Authors and editors are handled separately\nFIELDS = ('ID', 'ENTRYTYPE', 'journal', 'comments', 'pages',\n 'abstract', 'title', 'year', 'month', 'day', 'date',\n 'volume', 'keyword', 'url', 'doi', 'booktitle',\n 'publisher', 'number', 'isbn', 'issn', 'address',\n 'edition', 'organization')\nUNLATEXIFY_FIELDS = ('title', 'abstract', 'journal', 'booktitle', 'url', 'doi')\nBOOL_OPTIONS = (\"nodoi\",)\n\n\ndef parse_bool(s):\n return s.lower().strip() in (\"1\", \"true\", \"yes\")\n\n\nclass Difference(object):\n def __init__(self, entry_id, source, field, suggestion):\n self.entry_id = entry_id\n self.source = source\n self.field = field\n self.suggestion = suggestion\n\n\nclass Problem(object):\n def __init__(self, entry_id, source, problem_type, message, details):\n self.entry_id = entry_id\n self.problem_type = problem_type\n self.source = source\n self.message = message\n self.details = details\n\n\nclass Entry(object):\n DOI_RE = re.compile(r'https?://(dx\\.)?doi.org/(?P.*)')\n\n def __init__(self, bibtex_entry, ui):\n self._bentry = bibtex_entry\n self._id = bibtex_entry['ID']\n\n self.data = {}\n self.raw_data = {}\n self.options = {}\n self.authors = []\n self.editors = []\n\n self._ignore_diffs = {}\n self._ignore_problems = set()\n\n self._deduced_doi = None\n self._suggested_dois = []\n\n self._parse()\n self.authors = self._parse_people('author')\n self.editors = self._parse_people('editor')\n self._parse_options()\n self._doi_from_url()\n\n self._parse_ignore_diffs()\n self._parse_ignore_problems()\n\n self._ui = ui\n\n def get_id(self):\n return self._id\n\n def get_doi(self):\n if 'doi' in self.data:\n return self.data['doi']\n\n return self._deduced_doi\n\n def get_probable_doi(self):\n if self.get_doi():\n return self.get_doi()\n\n if self._suggested_dois:\n best_doi = max(set(self._suggested_dois),\n key=self._suggested_dois.count)\n return best_doi\n\n return None\n\n def get_suggested_dois(self):\n return self._suggested_dois\n\n def add_suggested_doi(self, doi):\n self._suggested_dois.append(doi)\n\n def _doi_from_url(self):\n if 'doi' in self.data:\n return\n\n m = Entry.DOI_RE.match(self.data.get('url', \"\"))\n if not m:\n return\n\n self._deduced_doi = m.group('doi')\n\n def _parse_options(self):\n for k, v in self._bentry.items():\n if k[:len(\"bibchex-\")] == \"bibchex-\":\n if k in (\"bibchex-ignore-diffs\", \"bibchex-ignore-problems\"):\n pass # Handled separately\n option = k[len(\"bibchex-\"):].lower()\n if option in BOOL_OPTIONS:\n self.options[option] = parse_bool(v)\n else:\n self.options[option] = v\n\n def _parse_ignore_diffs(self):\n if 'bibchex-ignore-diffs' not in self._bentry:\n return\n\n ignores = self._bentry['bibchex-ignore-diffs'].split(';')\n for ignore in ignores:\n tokens = ignore.split('.')\n source = tokens[0].lower()\n\n if source not in self._ignore_diffs:\n self._ignore_diffs[source] = set()\n\n if len(tokens) > 1:\n self._ignore_diffs[source].add(tokens[1].lower())\n else:\n self._ignore_diffs[source].add(\"*\")\n\n def _parse_ignore_problems(self):\n if 'bibchex-ignore-problems' not in self._bentry:\n return\n\n self._ignore_problems = set(\n (ignore.lower() for ignore in\n self._bentry['bibchex-ignore-problems'].split(';')))\n\n def should_ignore_diff(self, source, field):\n ignores = self._ignore_diffs.get(source.lower(), set())\n return field.lower().replace(\" \", \"\") in ignores or \"*\" in ignores\n\n def should_ignore_problem(self, problem_type):\n return problem_type.lower() in self._ignore_problems\n\n def _parse_people(self, fieldname):\n # First, split the authors' names by 'and', which may not be\n # enclosed in braces\n # TODO check brace-enclosing\n\n if fieldname not in self._bentry:\n return []\n\n authors_raw = crush_spaces(merge_lines(self._bentry[fieldname]))\n authors_split = split_at_multiple(authors_raw, [' and ', ' AND '])\n result = []\n\n for author_name in authors_split:\n comma_count = author_name.count(',')\n if comma_count == 0:\n # Name is to be read literally.\n # Everything including the second-to-last capitalized\n # word becomes the first name\n words = author_name.split(' ')\n if len(words) == 1:\n # Okay, only a last name\n first_last = 0\n else:\n i = len(words) - 2\n while i > 0:\n if words[i][0].isupper():\n break\n i -= 1\n first_last = i + 1\n\n first_name = \" \".join(words[:first_last])\n last_name = \" \".join(words[first_last:])\n elif comma_count == 1:\n # Last, First format\n (last_name, first_name) = author_name.split(\",\")\n first_name = first_name.strip()\n elif comma_count == 2:\n # Special case for Doe, Jr., Jon\n (last_name, jr, first_name) = author_name.split(\",\")\n jr = jr.strip()\n first_name = first_name.strip()\n last_name = \"{}, {}\".format(last_name, jr)\n else:\n self._ui.warning(\"Entry\",\n \"Unrecognized {} format for '{}'\"\n .format(fieldname, author_name))\n continue\n\n result.append(\n (unlatexify(first_name), unlatexify(last_name)))\n return result\n\n def _parse(self):\n for k, v in self._bentry.items():\n if k in FIELDS:\n if k.lower() in UNLATEXIFY_FIELDS:\n self.data[k.lower()] = unlatexify(merge_lines(v))\n self.raw_data[k.lower()] = merge_lines(v)\n else:\n self.data[k.lower()] = merge_lines(v)\n\n\nclass Suggestion(object):\n KIND_PLAIN = 1\n KIND_RE = 2\n\n def __init__(self, source, entry):\n self._entry = entry\n self.data = {}\n self.source = source\n self.authors = []\n self.editors = []\n\n def get_entry(self):\n return self._entry\n\n def add_field(self, k, vs, kind=KIND_PLAIN):\n if k not in self.data:\n self.data[k] = []\n\n\n if isinstance(vs, list):\n self.data[k].extend([(str(v), kind) for v in vs])\n else:\n self.data[k].append((str(vs), kind))\n\n def add_author(self, first, last):\n self.authors.append((first, last))\n\n def add_editor(self, first, last):\n self.editors.append((first, last))\n","sub_path":"bibchex/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":7395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"418498630","text":"# This is a test project to start analyzing data.\r\n\r\n# 1. Importing modules\r\nimport pandas as pd\r\n# matplotlib inline\r\nimport matplotlib.pyplot as plt\r\nplt.style.use('seaborn-whitegrid')\r\nimport numpy as np\r\n\r\n# 2. Accessing and saving PriceData ---------------------------------------\r\n# Data is saved as a CSV file and could be loaded each time.\r\n# However reading the entire CSV files takes about 3.5s.\r\n# Creating a pickle, and loading that pickle, reduces loading time to 1.5s.\r\n\r\n# PriceData = pd.read_csv('EURGBP_H4_1993TO2018.csv')\r\n# PriceData.to_pickle('EURGBP_H4_1993TO2018')\r\nPriceData = pd.read_pickle('EURGBP_M15_2016')\r\n\r\n# 3. Generating EMAs columns.\r\nStrategyData = pd.DataFrame([])\r\nStrategyData['MA05'] = ''\r\nStrategyData['MA10'] = ''\r\nStrategyData['MA15'] = ''\r\nStrategyData['MA20'] = ''\r\nStrategyData['MA25'] = ''\r\nStrategyData['MA30'] = ''\r\nStrategyData['MA35'] = ''\r\nStrategyData['MA40'] = ''\r\nStrategyData['MA45'] = ''\r\nStrategyData['MA50'] = ''\r\n\r\n# Calculating the EMAs for different timeframes.\r\nStrategyData['MA05'] = PriceData.ewm(span=5, adjust=False)[''].mean()\r\nStrategyData['MA10'] = PriceData.ewm(span=10, adjust=False)[''].mean()\r\nStrategyData['MA15'] = PriceData.ewm(span=15, adjust=False)[''].mean()\r\nStrategyData['MA20'] = PriceData.ewm(span=20, adjust=False)[''].mean()\r\nStrategyData['MA25'] = PriceData.ewm(span=25, adjust=False)[''].mean()\r\nStrategyData['MA30'] = PriceData.ewm(span=30, adjust=False)[''].mean()\r\nStrategyData['MA35'] = PriceData.ewm(span=35, adjust=False)[''].mean()\r\nStrategyData['MA40'] = PriceData.ewm(span=40, adjust=False)[''].mean()\r\nStrategyData['MA45'] = PriceData.ewm(span=45, adjust=False)[''].mean()\r\nStrategyData['MA50'] = PriceData.ewm(span=50, adjust=False)[''].mean()\r\n\r\n\r\n# Calculating the Variance of the data\r\nFinalData = pd.DataFrame([])\r\nFinalData['NormalData'] = StrategyData['MA05']\r\nFinalData['Velocity'] = ((StrategyData['MA05'] - StrategyData['MA05'].shift(1)) + (StrategyData['MA10'] - StrategyData['MA10'].shift(1))\r\n + (StrategyData['MA15'] - StrategyData['MA15'].shift(1)) + (StrategyData['MA20'] - StrategyData['MA20'].shift(1))\r\n + (StrategyData['MA25'] - StrategyData['MA25'].shift(1)) + (StrategyData['MA30'] - StrategyData['MA30'].shift(1))\r\n + (StrategyData['MA35'] - StrategyData['MA35'].shift(1)) + (StrategyData['MA40'] - StrategyData['MA40'].shift(1))\r\n + (StrategyData['MA40'] - StrategyData['MA40'].shift(1)) + (StrategyData['MA50'] - StrategyData['MA50'].shift(1))) / 10 * 100000\r\nFinalData['VelocityAveraged'] = (FinalData.ewm(span=25, adjust=False)['Velocity']).mean()\r\nFinalData['Acceleration'] = FinalData['VelocityAveraged'] - FinalData['VelocityAveraged'].shift(1)\r\nFinalData['AccelerationAveraged'] = (FinalData.ewm(span=25, adjust=False)['Acceleration']).mean()\r\n\r\n\r\n# fig, (ax1, ax2, ax3) = plt.subplots(nrows=3, ncols=1, sharex=True)\r\n# PlotData1 = FinalData['VelocityAveraged'].fillna(0)\r\n# PlotData2 = FinalData['AccelerationAveraged'].fillna(0)\r\n# ax1.plot(PriceData[''].iloc[3300: 3500], 'b')\r\n# ax2.plot(PlotData1.iloc[3300: 3500], 'r')\r\n# ax3.plot(PlotData2.iloc[3300: 3500], 'g')\r\n\r\n# plt.show()\r\n\r\nFinalData['StandardDeviation'] = 0\r\n\r\nfig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, sharex=False)\r\nPlotData1 = FinalData['VelocityAveraged'].fillna(0)\r\nfor i in range(500, FinalData['VelocityAveraged'].size):\r\n FinalData['StandardDeviation'].at[i] = FinalData['VelocityAveraged'].iloc[(i - 500):i].std()\r\n\r\nax1.hist(PlotData1, bins=50)\r\nax2.plot(FinalData['StandardDeviation'])\r\n\r\nplt.show()\r\n\r\n# print(FinalData.iloc[100:1000])\r\n","sub_path":"MA-Analysis/MultipleMAs_Analysis.py","file_name":"MultipleMAs_Analysis.py","file_ext":"py","file_size_in_byte":3715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"43725238","text":"import json\r\nimport pickle\r\n\r\nfrom django.shortcuts import render\r\nfrom django.shortcuts import HttpResponse, redirect\r\nfrom django.views import View\r\nfrom django.core.cache import cache\r\nfrom django_redis import get_redis_connection\r\n\r\n\r\nfrom apps.orders.models import Order, OrderGoods\r\nfrom apps.goods.models import Goods\r\nfrom apps.users.models import Address\r\n\r\n\r\nclass OrderCache(View):\r\n def post(self, request):\r\n print('cache')\r\n cache.set(request.user.username, request.POST['goods'])\r\n return HttpResponse('order cache')\r\n\r\n\r\nclass OrderCommit(View):\r\n def post(self, request):\r\n order_str = cache.get(request.user.username)\r\n goods_list = json.loads(order_str)\r\n address = Address.objects.get(addressId=request.POST['addressId'])\r\n total = sum([int(i['total']) for i in goods_list])\r\n order = Order(user=request.user, receiver=address.realName, phone=address.phone,\r\n address=address.address, status='付款', total=total)\r\n order.save()\r\n for oder_item in goods_list:\r\n goods = Goods.objects.get(skuId=oder_item['skuId'], is_limit=False)\r\n goods_order = OrderGoods(order=order, goods=goods,\r\n num=oder_item['num'], total=oder_item['total'])\r\n goods_order.save()\r\n return redirect('/order.html')\r\n\r\n\r\nclass LimitOrderCommit(View):\r\n def post(self, request):\r\n conn = get_redis_connection('default')\r\n order_str = cache.get(request.user.username)\r\n if not order_str:\r\n return HttpResponse('请重新提交订单')\r\n goods_list = json.loads(order_str)\r\n skuId = goods_list[0].get('skuId')\r\n goods_key = 'flashSale_Goods:%s' % skuId\r\n if conn.exists(goods_key):\r\n\r\n goods_bytes = conn.hgetall(goods_key)\r\n goods = pickle.loads(list(goods_bytes)[0])\r\n print(type(goods_bytes))\r\n pipe = conn.pipeline()\r\n pipe.watch(goods_key)\r\n if int(pipe.hget(goods_key, list(goods_bytes)[0])) > 0:\r\n pipe.hincrby(goods_key, list(goods_bytes)[0])\r\n else:\r\n HttpResponse('已经抢光')\r\n order = Order(user=request.user, status='未付款', total=int(goods.price))\r\n order.save()\r\n pipe.execute()\r\n return HttpResponse('提交成功')\r\n else:\r\n return HttpResponse('还没开始')\r\n\r\n\r\nclass OderView(View):\r\n def get(self, request):\r\n order_list = Order.objects.filter(user=request.user)\r\n return render(request, 'order.html', {'order_list': order_list})\r\n\r\n\r\nclass OrderDatailView(View):\r\n def get(self, request):\r\n order = Order.objects.get(orderId=request.GET['id'])\r\n print(order)\r\n return render(request, 'order_details.html', {'order': order})\r\n\r\n\r\nclass CheckOut(View):\r\n def get(self, request):\r\n order_tmp = cache.get(request.user.username)\r\n try:\r\n goods_list = json.loads(str(order_tmp))\r\n total = sum([int(i['total']) for i in goods_list])\r\n except TypeError as e:\r\n print(e)\r\n return redirect('/cart.html')\r\n is_limit = goods_list[0].get('is_limit')\r\n return render(request, 'checkout.html', {'goods_list': goods_list, 'total': total, 'is_limit': is_limit})\r\n","sub_path":"apps/orders/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"71188738","text":"import sys\n\nfrom setuptools import setup\n\ntry:\n from setuptools_rust import Binding, RustExtension\nexcept ImportError:\n import subprocess\n\n errno = subprocess.call(\n [sys.executable, \"-m\", \"pip\", \"install\", \"setuptools-rust\"])\n if errno:\n print(\"Please install setuptools-rust package\")\n raise SystemExit(errno)\n else:\n from setuptools_rust import Binding, RustExtension\n\nsetup(\n name='py-sourcemap',\n version='0.1',\n rust_extensions=[\n RustExtension(\n 'py_sourcemap.py_sourcemap', 'Cargo.toml', binding=Binding.PyO3)\n ],\n packages=['py_sourcemap'],\n setup_requires=['setuptools_rust>=0.10.2'],\n # rust extensions are not zip safe, just like C-extensions.\n zip_safe=False)\n","sub_path":"compile.py","file_name":"compile.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"543679680","text":"import requests\n\nimport settings\n\n\ndef champion(champion_id=\"\"):\n url = \"/\".join([settings.API_ENDPOINT,\n \"api/lol\",\n settings.API_REGION,\n \"v1.2\",\n \"champion\",\n str(champion_id)])\n url = url + \"?api_key={api_key}\".format(api_key=settings.API_DEV_KEY)\n response = requests.get(url)\n\n return response\n\n\nif __name__ == \"__main__\":\n print(champion())\n print(champion(1))\n","sub_path":"champions.py","file_name":"champions.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"177157674","text":"#! /usr/bin/env python3\n\n#from tools.wordProcessor import *\nimport regex\nfrom tools.wordProcessor import WordProcessor as WP\nfrom tools.ngram import NGram as NG\n\n\ndef main():\n\tlang=\"english\"\n\tdatapkg = \"corpus\"\n\tbook = \"eng/myBigErrorsList.txt\"\n\t\n\tdata = WP.readBook(WP, datapkg, book)\n\t#words = regex.split(\"(\\n+)\", data.lower())\n\twords = regex.split(\"(\\n+)\", data)\n\tng = NG(lang)\n\tcletter, n =\"\", 0;\n\tfor word in words:\n\t\tif \"\\n\" in word:\n\t\t\tcletter += str('\\n')\n\t\telse:\n\t\t\tfor w in regex.split(\"\\W+\", word):\n\t\t\t\tif len(w):\n\t\t\t\t\tn +=1\n\t\t\t\t\tprint(\"correct(%r) => %r\" % (w, ng.correct(w.lower())))\n\t\t\t\t\tcletter += str(ng.correct(w) + str(\" \"))\n \n\tprint(\"######## Original Txt ########\")\n\tprint(data)\n\tprint(\"######## Txt After Correction ########\")\n\tprint(cletter)\n\tprint(\"################\")\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"filecorct.py","file_name":"filecorct.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"105304498","text":"from pulsar.utils.pep import iteritems, is_string\n\nfrom lux import Html\n\n\ncontent_types = {}\n\n\ndef apply_content(html, content, context):\n if content.content_type in content_types:\n handler = content_types[content.content_type]\n else:\n handler = content_types['contenttype']\n handler(html, content, context)\n\n\nclass ContentTypeMeta(type):\n '''\n Just a metaclass to differentiate plugins from other classes\n '''\n def __new__(cls, name, bases, attrs):\n new_class = super(ContentTypeMeta, cls).__new__\n if attrs.pop('abstract', None):\n return new_class(cls, name, bases, attrs)\n pname = (attrs.get('name') or name).lower()\n pcls = new_class(cls, name, bases, attrs)\n content_types[pname] = pcls()\n return pcls\n\n\nclass ContentType(ContentTypeMeta('ContentTypeBase', (), {'abstract': True})):\n\n def __call__(self, html, fields, context):\n html.data('fields', len(fields))\n for name, value in iteritems(fields):\n if is_string(value):\n html.append(Html('div', value, field=name))\n else:\n html.append(Html('div', field=name, value=value))\n\n\nclass ContentUrl(ContentType):\n\n def __call__(self, html, fields, context):\n if fields.get('content_url') == 'this':\n html.data('fields', 0)\n html.append(context.get('this'))\n else:\n super(ContentUrl, self).__call__(html, fields, context)\n\n\nclass DataTable(ContentType):\n\n def __call__(self, html, fields, context):\n html.data('fields', 0)\n data = {'col-headers': fields.get('fields'),\n 'ajax-url': fields.get('url')}\n html.append(Html('div', cn='datagrid', data=data))\n","sub_path":"lux/extensions/cms/contents.py","file_name":"contents.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"285566637","text":"import click\nimport os\nimport yaml\nimport gzip\nimport shutil\n\n@click.group()\ndef cli():\n pass\n\n@cli.command()\n@click.argument(\"merger\", type=click.Path(exists=True))\n@click.argument(\"merge_into\", type=click.Path(exists=True))\ndef merge(merger, merge_into):\n append_zip_into_zip(merger, merge_into)\n\ndef dataset_id_to_path(dataset_id, groups_dir):\n paint_dir = dataset_id.split(\".\")[0]\n return os.path.abspath(os.path.join(groups_dir, paint_dir, \"{}.gz\".format(dataset_id)))\n\ndef merges_into_path(merges_into, groups_dir):\n merge_into_gaf = \"{}.gaf.gz\".format(merges_into)\n return os.path.abspath(os.path.join(groups_dir, merges_into, merge_into_gaf))\n\ndef append_zip_into_zip(merger, merge_into):\n if not os.path.exists(merger):\n click.echo(click.style(\"{} does not exist, skipping\".format(merger), fg=\"red\"), err=True)\n return\n\n if not os.path.exists(merge_into):\n click.echo(click.style(\"{} does not exist, skipping\".format(merge_into), fg=\"red\"), err=True)\n return\n\n base, leaf = os.path.split(merge_into)\n merged_leaf = leaf.split(\".gaf.gz\")[0] + \"_merged.gaf.gz\"\n final = os.path.join(base, merged_leaf)\n\n final_f = open(final, \"wb\")\n final_zip = gzip.GzipFile(\"\", mode=\"wb\", fileobj=final_f)\n merger_zip = gzip.GzipFile(merger)\n merge_into_zip = gzip.GzipFile(merge_into)\n\n final_zip.write(merge_into_zip.read())\n final_zip.write(merger_zip.read())\n\n\nif __name__ == \"__main__\":\n cli()\n","sub_path":"pipeline/util/paint_merge.py","file_name":"paint_merge.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"471437807","text":"# coding: utf-8\n# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\nimport os\nimport random\nimport unittest\nimport json\nimport warnings\nimport functools\nfrom monty.os.path import which\nfrom pymatgen import Lattice, PeriodicSite, Element\nfrom monty.json import MontyDecoder\nfrom pymatgen.io.vasp.inputs import Poscar\nfrom pymatgen.transformations.standard_transformations import *\nfrom pymatgen.symmetry.structure import SymmetrizedStructure\n\n'''\nCreated on Sep 23, 2011\n'''\n\n__author__ = \"Shyue Ping Ong\"\n__copyright__ = \"Copyright 2011, The Materials Project\"\n__version__ = \"0.1\"\n__maintainer__ = \"Shyue Ping Ong\"\n__email__ = \"shyuep@gmail.com\"\n__date__ = \"Sep 23, 2011\"\n\n\ntest_dir = os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"..\",\n 'test_files')\n\nenumlib_present = which('enum.x') and which('makestr.x')\n\n\nclass RotationTransformationsTest(unittest.TestCase):\n def setUp(self):\n coords = list()\n coords.append([0, 0, 0])\n coords.append([0.75, 0.5, 0.75])\n lattice = Lattice([[3.8401979337, 0.00, 0.00],\n [1.9200989668, 3.3257101909, 0.00],\n [0.00, -2.2171384943, 3.1355090603]])\n self.struct = Structure(lattice, [\"Si\"] * 2, coords)\n\n def test_as_from_dict(self):\n t = RotationTransformation([0, 1, 0], 30, False)\n d = t.as_dict()\n self.assertEqual(type(RotationTransformation.from_dict(d)),\n RotationTransformation)\n\n def test_rotation_transformation(self):\n t = RotationTransformation([0, 1, 0], 30, False)\n s2 = t.apply_transformation(self.struct)\n s1 = t.inverse.apply_transformation(s2)\n self.assertTrue((abs(s1.lattice.matrix - self.struct.lattice.matrix)\n < 1e-8).all())\n\n\nclass RemoveSpeciesTransformationTest(unittest.TestCase):\n def test_apply_transformation(self):\n t = RemoveSpeciesTransformation([\"Li+\"])\n coords = list()\n coords.append([0, 0, 0])\n coords.append([0.75, 0.75, 0.75])\n coords.append([0.5, 0.5, 0.5])\n coords.append([0.25, 0.25, 0.25])\n lattice = Lattice([[3.8401979337, 0.00, 0.00],\n [1.9200989668, 3.3257101909, 0.00],\n [0.00, -2.2171384943, 3.1355090603]])\n struct = Structure(lattice, [\"Li+\", \"Li+\", \"O2-\", \"O2-\"], coords)\n s = t.apply_transformation(struct)\n self.assertEqual(s.composition.formula, \"O2\")\n\n d = t.as_dict()\n self.assertEqual(type(RemoveSpeciesTransformation.from_dict(d)),\n RemoveSpeciesTransformation)\n\n\nclass SubstitutionTransformationTest(unittest.TestCase):\n def test_apply_transformation(self):\n t = SubstitutionTransformation({\"Li+\": \"Na+\", \"O2-\": \"S2-\"})\n coords = list()\n coords.append([0, 0, 0])\n coords.append([0.75, 0.75, 0.75])\n coords.append([0.5, 0.5, 0.5])\n coords.append([0.25, 0.25, 0.25])\n lattice = Lattice([[3.8401979337, 0.00, 0.00],\n [1.9200989668, 3.3257101909, 0.00],\n [0.00, -2.2171384943, 3.1355090603]])\n struct = Structure(lattice, [\"Li+\", \"Li+\", \"O2-\", \"O2-\"], coords)\n s = t.apply_transformation(struct)\n self.assertEqual(s.composition.formula, \"Na2 S2\")\n\n def test_fractional_substitution(self):\n t = SubstitutionTransformation({\"Li+\": \"Na+\",\n \"O2-\": {\"S2-\": 0.5, \"Se2-\": 0.5}})\n # test the to and from dict on the nested dictionary\n t = SubstitutionTransformation.from_dict(t.as_dict())\n coords = list()\n coords.append([0, 0, 0])\n coords.append([0.75, 0.75, 0.75])\n coords.append([0.5, 0.5, 0.5])\n coords.append([0.25, 0.25, 0.25])\n lattice = Lattice([[3.8401979337, 0.00, 0.00],\n [1.9200989668, 3.3257101909, 0.00],\n [0.00, -2.2171384943, 3.1355090603]])\n struct = Structure(lattice, [\"Li+\", \"Li+\", \"O2-\", \"O2-\"], coords)\n s = t.apply_transformation(struct)\n self.assertEqual(s.composition.formula, \"Na2 Se1 S1\")\n\n\nclass SupercellTransformationTest(unittest.TestCase):\n def setUp(self):\n coords = list()\n coords.append([0, 0, 0])\n coords.append([0.75, 0.75, 0.75])\n coords.append([0.5, 0.5, 0.5])\n coords.append([0.25, 0.25, 0.25])\n lattice = Lattice([[3.8401979337, 0.00, 0.00],\n [1.9200989668, 3.3257101909, 0.00],\n [0.00, -2.2171384943, 3.1355090603]])\n self.struct = Structure(lattice, [\"Li+\", \"Li+\", \"O2-\", \"O2-\"], coords)\n\n def test_apply_transformation(self):\n t = SupercellTransformation([[2, 1, 0], [0, 2, 0], [1, 0, 2]])\n s = t.apply_transformation(self.struct)\n self.assertEqual(s.composition.formula, \"Li16 O16\")\n\n def test_from_scaling_factors(self):\n scale_factors = [random.randint(1, 5) for i in range(3)]\n t = SupercellTransformation.from_scaling_factors(*scale_factors)\n s = t.apply_transformation(self.struct)\n self.assertEqual(s.num_sites,\n 4 * functools.reduce(lambda a, b: a * b,\n scale_factors))\n\n\nclass OxidationStateDecorationTransformationTest(unittest.TestCase):\n def test_apply_transformation(self):\n t = OxidationStateDecorationTransformation({\"Li\": 1, \"O\": -2})\n coords = list()\n coords.append([0, 0, 0])\n coords.append([0.75, 0.75, 0.75])\n coords.append([0.5, 0.5, 0.5])\n coords.append([0.25, 0.25, 0.25])\n lattice = Lattice([[3.8401979337, 0.00, 0.00],\n [1.9200989668, 3.3257101909, 0.00],\n [0.00, -2.2171384943, 3.1355090603]])\n struct = Structure(lattice, [\"Li\", \"Li\", \"O\", \"O\"], coords)\n s = t.apply_transformation(struct)\n self.assertEqual(s[0].species_string, \"Li+\")\n self.assertEqual(s[2].species_string, \"O2-\")\n d = t.as_dict()\n self.assertEqual(\n type(OxidationStateDecorationTransformation.from_dict(d)),\n OxidationStateDecorationTransformation)\n\n\nclass AutoOxiStateDecorationTransformationTest(unittest.TestCase):\n def test_apply_transformation(self):\n p = Poscar.from_file(os.path.join(test_dir, 'POSCAR.LiFePO4'),\n check_for_POTCAR=False)\n t = AutoOxiStateDecorationTransformation()\n s = t.apply_transformation(p.structure)\n expected_oxi = {\"Li\": 1, \"P\": 5, \"O\": -2, \"Fe\": 2}\n for site in s:\n self.assertEqual(site.specie.oxi_state,\n expected_oxi[site.specie.symbol])\n\n def test_as_from_dict(self):\n t = AutoOxiStateDecorationTransformation()\n d = t.as_dict()\n t = AutoOxiStateDecorationTransformation.from_dict(d)\n self.assertEqual(t.analyzer.dist_scale_factor, 1.015)\n\n\nclass OxidationStateRemovalTransformationTest(unittest.TestCase):\n def test_apply_transformation(self):\n t = OxidationStateRemovalTransformation()\n coords = list()\n coords.append([0, 0, 0])\n coords.append([0.75, 0.75, 0.75])\n coords.append([0.5, 0.5, 0.5])\n coords.append([0.25, 0.25, 0.25])\n lattice = Lattice([[3.8401979337, 0.00, 0.00],\n [1.9200989668, 3.3257101909, 0.00],\n [0.00, -2.2171384943, 3.1355090603]])\n struct = Structure(lattice, [\"Li+\", \"Li+\", \"O2-\", \"O2-\"], coords)\n s = t.apply_transformation(struct)\n self.assertEqual(s[0].species_string, \"Li\")\n self.assertEqual(s[2].species_string, \"O\")\n\n d = t.as_dict()\n self.assertEqual(type(OxidationStateRemovalTransformation.from_dict(d)),\n OxidationStateRemovalTransformation)\n\n\n@unittest.skipIf(not enumlib_present, \"enum_lib not present.\")\nclass PartialRemoveSpecieTransformationTest(unittest.TestCase):\n\n def setUp(self):\n warnings.simplefilter(\"ignore\")\n\n def tearDown(self):\n warnings.simplefilter(\"default\")\n\n def test_apply_transformation(self):\n t = PartialRemoveSpecieTransformation(\"Li+\", 1.0 / 3, 3)\n coords = list()\n coords.append([0, 0, 0])\n coords.append([0.75, 0.75, 0.75])\n coords.append([0.5, 0.5, 0.5])\n coords.append([0.25, 0.25, 0.25])\n lattice = Lattice([[3.8401979337, 0.00, 0.00],\n [1.9200989668, 3.3257101909, 0.00],\n [0.00, -2.2171384943, 3.1355090603]])\n struct = Structure(lattice, [\"Li+\", \"Li+\", \"Li+\", \"O2-\"], coords)\n self.assertEqual(len(t.apply_transformation(struct, 100)), 2)\n\n d = t.as_dict()\n self.assertEqual(type(PartialRemoveSpecieTransformation.from_dict(d)),\n PartialRemoveSpecieTransformation)\n\n def test_apply_transformation_fast(self):\n t = PartialRemoveSpecieTransformation(\"Li+\", 0.5)\n coords = list()\n coords.append([0, 0, 0])\n coords.append([0.75, 0.75, 0.75])\n coords.append([0.5, 0.5, 0.5])\n coords.append([0.25, 0.25, 0.25])\n coords.append([0.1, 0.1, 0.1])\n coords.append([0.3, 0.75, 0.3])\n lattice = Lattice([[10, 0.00, 0.00], [0, 10, 0.00], [0.00, 0, 10]])\n struct = Structure(lattice, [\"Li+\"] * 6, coords)\n fast_opt_s = t.apply_transformation(struct)\n t = PartialRemoveSpecieTransformation(\"Li+\", 0.5, PartialRemoveSpecieTransformation.ALGO_COMPLETE)\n slow_opt_s = t.apply_transformation(struct)\n self.assertAlmostEqual(EwaldSummation(fast_opt_s).total_energy,\n EwaldSummation(slow_opt_s).total_energy, 4)\n self.assertEqual(fast_opt_s, slow_opt_s)\n\n def test_apply_transformations_complete_ranking(self):\n p = Poscar.from_file(os.path.join(test_dir, 'POSCAR.LiFePO4'),\n check_for_POTCAR=False)\n t1 = OxidationStateDecorationTransformation({\"Li\": 1, \"Fe\": 2, \"P\": 5,\n \"O\": -2})\n s = t1.apply_transformation(p.structure)\n t = PartialRemoveSpecieTransformation(\"Li+\", 0.5, PartialRemoveSpecieTransformation.ALGO_COMPLETE)\n self.assertEqual(len(t.apply_transformation(s, 10)), 6)\n\n def test_apply_transformations_best_first(self):\n p = Poscar.from_file(os.path.join(test_dir, 'POSCAR.LiFePO4'),\n check_for_POTCAR=False)\n t1 = OxidationStateDecorationTransformation({\"Li\": 1, \"Fe\": 2, \"P\": 5,\n \"O\": -2})\n s = t1.apply_transformation(p.structure)\n t = PartialRemoveSpecieTransformation(\"Li+\", 0.5,\n PartialRemoveSpecieTransformation.ALGO_BEST_FIRST)\n self.assertEqual(len(t.apply_transformation(s)), 26)\n\n\nclass OrderDisorderedStructureTransformationTest(unittest.TestCase):\n def test_apply_transformation(self):\n t = OrderDisorderedStructureTransformation()\n coords = list()\n coords.append([0, 0, 0])\n coords.append([0.75, 0.75, 0.75])\n coords.append([0.5, 0.5, 0.5])\n coords.append([0.25, 0.25, 0.25])\n lattice = Lattice([[3.8401979337, 0.00, 0.00],\n [1.9200989668, 3.3257101909, 0.00],\n [0.00, -2.2171384943, 3.1355090603]])\n\n struct = Structure(lattice, [{\"Si4+\": 0.5, \"O2-\": 0.25, \"P5+\": 0.25},\n {\"Si4+\": 0.5, \"O2-\": 0.25, \"P5+\": 0.25},\n {\"Si4+\": 0.5, \"O2-\": 0.25, \"P5+\": 0.25},\n {\"Si4+\": 0.5, \"O2-\": 0.25, \"P5+\": 0.25}],\n coords)\n output = t.apply_transformation(struct, return_ranked_list=50)\n self.assertEqual(len(output), 12)\n self.assertIsInstance(output[0]['structure'], Structure)\n\n struct = Structure(lattice, [{\"Si4+\": 0.5}, {\"Si4+\": 0.5},\n {\"P5+\": 0.5, \"O2-\": 0.5},\n {\"P5+\": 0.5, \"O2-\": 0.5}],\n coords)\n output = t.apply_transformation(struct, return_ranked_list=50)\n self.assertIsInstance(output, list)\n self.assertEqual(len(output), 4)\n self.assertEqual(t.lowest_energy_structure, output[0]['structure'])\n\n struct = Structure(lattice, [{\"Si4+\": 0.5}, {\"Si4+\": 0.5}, {\"O2-\": 0.5},\n {\"O2-\": 0.5}], coords)\n allstructs = t.apply_transformation(struct, 50)\n self.assertEqual(len(allstructs), 4)\n\n struct = Structure(lattice, [{\"Si4+\": 0.333}, {\"Si4+\": 0.333},\n {\"Si4+\": 0.333}, \"O2-\"], coords)\n allstructs = t.apply_transformation(struct, 50)\n self.assertEqual(len(allstructs), 3)\n\n d = t.as_dict()\n self.assertEqual(\n type(OrderDisorderedStructureTransformation.from_dict(d)),\n OrderDisorderedStructureTransformation)\n\n def test_no_oxidation(self):\n specie = {\"Cu1+\": 0.5, \"Au2+\": 0.5}\n cuau = Structure.from_spacegroup(\"Fm-3m\", Lattice.cubic(3.677),\n [specie], [[0, 0, 0]])\n trans = OrderDisorderedStructureTransformation()\n ss = trans.apply_transformation(cuau, return_ranked_list=100)\n self.assertEqual(ss[0][\"structure\"].composition[\"Cu+\"], 2)\n trans = OrderDisorderedStructureTransformation(no_oxi_states=True)\n ss = trans.apply_transformation(cuau, return_ranked_list=100)\n self.assertEqual(ss[0][\"structure\"].composition[\"Cu+\"], 0)\n self.assertEqual(ss[0][\"structure\"].composition[\"Cu\"], 2)\n\n def test_symmetrized_structure(self):\n t = OrderDisorderedStructureTransformation(symmetrized_structures=True)\n c = []\n sp = []\n c.append([0.5, 0.5, 0.5])\n sp.append('Si4+')\n c.append([0.45, 0.45, 0.45])\n sp.append({\"Si4+\": 0.5})\n c.append([0.56, 0.56, 0.56])\n sp.append({\"Si4+\": 0.5})\n c.append([0.25, 0.75, 0.75])\n sp.append({\"Si4+\": 0.5})\n c.append([0.75, 0.25, 0.25])\n sp.append({\"Si4+\": 0.5})\n l = Lattice.cubic(5)\n s = Structure(l, sp, c)\n test_site = PeriodicSite(\"Si4+\", c[2], l)\n s = SymmetrizedStructure(s, 'not_real', [0, 1, 1, 2, 2],\n [\"a\", \"b\", \"b\", \"c\", \"c\"])\n output = t.apply_transformation(s)\n self.assertTrue(test_site in output.sites)\n\n def test_too_small_cell(self):\n t = OrderDisorderedStructureTransformation()\n coords = list()\n coords.append([0.5, 0.5, 0.5])\n lattice = Lattice([[3.8401979337, 0.00, 0.00],\n [1.9200989668, 3.3257101909, 0.00],\n [0.00, -2.2171384943, 3.1355090603]])\n struct = Structure(lattice, [{\"X4+\": 0.33, \"O2-\": 0.33, \"P5+\": 0.33}],\n coords)\n self.assertRaises(ValueError, t.apply_transformation, struct)\n\n def test_best_first(self):\n t = OrderDisorderedStructureTransformation(algo=2)\n coords = list()\n coords.append([0, 0, 0])\n coords.append([0.75, 0.75, 0.75])\n coords.append([0.5, 0.5, 0.5])\n coords.append([0.25, 0.25, 0.25])\n lattice = Lattice([[3.8401979337, 0.00, 0.00],\n [1.9200989668, 3.3257101909, 0.00],\n [0.00, -2.2171384943, 3.1355090603]])\n\n struct = Structure(lattice, [{\"Si4+\": 0.5, \"O2-\": 0.25, \"P5+\": 0.25},\n {\"Si4+\": 0.5, \"O2-\": 0.25, \"P5+\": 0.25},\n {\"Si4+\": 0.5, \"O2-\": 0.25, \"P5+\": 0.25},\n {\"Si4+\": 0.5, \"O2-\": 0.25, \"P5+\": 0.25}],\n coords)\n output = t.apply_transformation(struct, return_ranked_list=3)\n self.assertAlmostEqual(output[0]['energy'], -234.57813667648315, 4)\n\n\nclass PrimitiveCellTransformationTest(unittest.TestCase):\n def test_apply_transformation(self):\n t = PrimitiveCellTransformation()\n coords = list()\n coords.append([0, 0, 0])\n coords.append([0.375, 0.375, 0.375])\n coords.append([.5, .5, .5])\n coords.append([0.875, 0.875, 0.875])\n coords.append([0.125, 0.125, 0.125])\n coords.append([0.25, 0.25, 0.25])\n coords.append([0.625, 0.625, 0.625])\n coords.append([0.75, 0.75, 0.75])\n\n lattice = Lattice([[3.8401979337, 0.00, 0.00],\n [1.9200989668, 3.3257101909, 0.00],\n [0.00, -2.2171384943, 3.1355090603]])\n struct = Structure(lattice, [\"Li+\", \"Li+\", \"Li+\", \"Li+\",\n \"O2-\", \"O2-\", \"O2-\", \"O2-\"],\n coords)\n s = t.apply_transformation(struct)\n self.assertEqual(len(s), 4)\n\n with open(os.path.join(test_dir, \"TiO2_super.json\")) as f:\n s = json.load(f, cls=MontyDecoder)\n prim = t.apply_transformation(s)\n self.assertEqual(prim.formula, \"Ti4 O8\")\n\n d = t.as_dict()\n self.assertEqual(type(PrimitiveCellTransformation.from_dict(d)),\n PrimitiveCellTransformation)\n\n\nclass ConventionalCellTransformationTest(unittest.TestCase):\n def test_apply_transformation(self):\n t = ConventionalCellTransformation()\n coords = list()\n coords.append([0, 0, 0])\n coords.append([0.75, 0.75, 0.75])\n coords.append([0.5, 0.5, 0.5])\n coords.append([0.25, 0.25, 0.25])\n lattice = Lattice([[3.8401979337, 0.00, 0.00],\n [1.9200989668, 3.3257101909, 0.00],\n [0.00, -2.2171384943, 3.1355090603]])\n struct = Structure(lattice, [\"Li+\", \"Li+\", \"O2-\", \"O2-\"], coords)\n conventional_struct = t.apply_transformation(struct)\n self.assertEqual(conventional_struct.lattice.alpha, 90)\n\n\nclass PerturbStructureTransformationTest(unittest.TestCase):\n def test_apply_transformation(self):\n t = PerturbStructureTransformation(0.05)\n coords = list()\n coords.append([0, 0, 0])\n coords.append([0.375, 0.375, 0.375])\n coords.append([.5, .5, .5])\n coords.append([0.875, 0.875, 0.875])\n coords.append([0.125, 0.125, 0.125])\n coords.append([0.25, 0.25, 0.25])\n coords.append([0.625, 0.625, 0.625])\n coords.append([0.75, 0.75, 0.75])\n\n lattice = [[3.8401979337, 0.00, 0.00],\n [1.9200989668, 3.3257101909, 0.00],\n [0.00, -2.2171384943, 3.1355090603]]\n struct = Structure(lattice, [\"Li+\", \"Li+\", \"Li+\", \"Li+\",\n \"O2-\", \"O2-\", \"O2-\", \"O2-\"], coords)\n transformed_s = t.apply_transformation(struct)\n for i, site in enumerate(transformed_s):\n self.assertAlmostEqual(site.distance(struct[i]), 0.05)\n\n d = t.as_dict()\n self.assertEqual(type(PerturbStructureTransformation.from_dict(d)),\n PerturbStructureTransformation)\n\n\nclass DeformStructureTransformationTest(unittest.TestCase):\n def test_apply_transformation(self):\n t = DeformStructureTransformation([[1., 0., 0.],\n [0., 1., 0.],\n [0., 0.05, 1.]])\n coords = list()\n coords.append([0, 0, 0])\n coords.append([0.375, 0.375, 0.375])\n coords.append([.5, .5, .5])\n coords.append([0.875, 0.875, 0.875])\n coords.append([0.125, 0.125, 0.125])\n coords.append([0.25, 0.25, 0.25])\n coords.append([0.625, 0.625, 0.625])\n coords.append([0.75, 0.75, 0.75])\n\n lattice = [[3.8401979337, 0.00, 0.00],\n [1.9200989668, 3.3257101909, 0.00],\n [0.00, -2.2171384943, 3.1355090603]]\n struct = Structure(lattice, [\"Li+\", \"Li+\", \"Li+\", \"Li+\",\n \"O2-\", \"O2-\", \"O2-\", \"O2-\"], coords)\n transformed_s = t.apply_transformation(struct)\n self.assertAlmostEqual(transformed_s.lattice.a, 3.84019793)\n self.assertAlmostEqual(transformed_s.lattice.b, 3.84379750)\n self.assertAlmostEqual(transformed_s.lattice.c, 3.75022981)\n\n d = json.loads(json.dumps(t.as_dict()))\n self.assertEqual(type(DeformStructureTransformation.from_dict(d)),\n DeformStructureTransformation)\n\n\nclass DiscretizeOccupanciesTransformationTest(unittest.TestCase):\n\n def test_apply_transformation(self):\n l = Lattice.cubic(4)\n s_orig = Structure(l, [{\"Li\": 0.19, \"Na\": 0.19, \"K\": 0.62}, {\"O\": 1}],\n [[0, 0, 0], [0.5, 0.5, 0.5]])\n dot = DiscretizeOccupanciesTransformation(max_denominator=5, tol=0.5)\n s = dot.apply_transformation(s_orig)\n self.assertEqual(dict(s[0].species), {Element(\"Li\"): 0.2,\n Element(\"Na\"): 0.2,\n Element(\"K\"): 0.6})\n\n dot = DiscretizeOccupanciesTransformation(max_denominator=5, tol=0.01)\n self.assertRaises(RuntimeError, dot.apply_transformation, s_orig)\n\n s_orig_2 = Structure(l, [{\"Li\": 0.5, \"Na\": 0.25, \"K\": 0.25}, {\"O\": 1}],\n [[0, 0, 0], [0.5, 0.5, 0.5]])\n\n dot = DiscretizeOccupanciesTransformation(max_denominator=9, tol=0.25,\n fix_denominator=False)\n\n s = dot.apply_transformation(s_orig_2)\n self.assertEqual(dict(s[0].species), {Element(\"Li\"): Fraction(1/2),\n Element(\"Na\"): Fraction(1/4),\n Element(\"K\"): Fraction(1/4)})\n\n dot = DiscretizeOccupanciesTransformation(max_denominator=9, tol=0.05,\n fix_denominator=True)\n self.assertRaises(RuntimeError, dot.apply_transformation, s_orig_2)\n\n\nclass ChargedCellTransformationTest(unittest.TestCase):\n\n def test_apply_transformation(self):\n l = Lattice.cubic(4)\n s_orig = Structure(l, [{\"Li\": 0.19, \"Na\": 0.19, \"K\": 0.62}, {\"O\": 1}],\n [[0, 0, 0], [0.5, 0.5, 0.5]])\n cct = ChargedCellTransformation(charge=3)\n s = cct.apply_transformation(s_orig)\n self.assertEqual(s.charge, 3)\n\n\nclass ScaleToRelaxedTransformationTest(unittest.TestCase):\n\n def test_apply_transformation(self):\n\n # Test on slab relaxation where volume is fixed\n f = os.path.join(test_dir, \"surface_tests\")\n Cu_fin = Structure.from_file(os.path.join(f, 'Cu_slab_fin.cif'))\n Cu_init = Structure.from_file(os.path.join(f, 'Cu_slab_init.cif'))\n slab_scaling = ScaleToRelaxedTransformation(Cu_init, Cu_fin)\n Au_init = Structure.from_file(os.path.join(f, 'Au_slab_init.cif'))\n Au_fin = slab_scaling.apply_transformation(Au_init)\n self.assertAlmostEqual(Au_fin.lattice.volume, Au_init.lattice.volume)\n\n # Test on gb relaxation\n f = os.path.join(test_dir, \"grain_boundary\")\n Be_fin = Structure.from_file(os.path.join(f, 'Be_gb_fin.cif'))\n Be_init = Structure.from_file(os.path.join(f, 'Be_gb_init.cif'))\n Zn_init = Structure.from_file(os.path.join(f, 'Zn_gb_init.cif'))\n gb_scaling = ScaleToRelaxedTransformation(Be_init, Be_fin)\n Zn_fin = gb_scaling.apply_transformation(Zn_init)\n self.assertTrue(all([site.species_string == \"Zn\" for site in Zn_fin]))\n self.assertEqual(Be_init.lattice.a < Be_fin.lattice.a, Zn_init.lattice.a < Zn_fin.lattice.a)\n self.assertEqual(Be_init.lattice.b < Be_fin.lattice.b, Zn_init.lattice.b < Zn_fin.lattice.b)\n self.assertEqual(Be_init.lattice.c < Be_fin.lattice.c, Zn_init.lattice.c < Zn_fin.lattice.c)\n Fe_fin = Structure.from_file(os.path.join(f, 'Fe_gb_fin.cif'))\n Fe_init = Structure.from_file(os.path.join(f, 'Fe_gb_init.cif'))\n Mo_init = Structure.from_file(os.path.join(f, 'Mo_gb_init.cif'))\n gb_scaling = ScaleToRelaxedTransformation(Fe_init, Fe_fin)\n Mo_fin = gb_scaling.apply_transformation(Mo_init)\n self.assertTrue(all([site.species_string == \"Mo\" for site in Mo_fin]))\n self.assertEqual(Fe_init.lattice.a < Fe_fin.lattice.a, Mo_init.lattice.a < Mo_fin.lattice.a)\n self.assertEqual(Fe_init.lattice.b < Fe_fin.lattice.b, Mo_init.lattice.b < Mo_fin.lattice.b)\n self.assertEqual(Fe_init.lattice.c < Fe_fin.lattice.c, Mo_init.lattice.c < Mo_fin.lattice.c)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"pymatgen/transformations/tests/test_standard_transformations.py","file_name":"test_standard_transformations.py","file_ext":"py","file_size_in_byte":24866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"540368506","text":"from collections import deque\nimport sys\nimport cv2\nimport numpy as np\nimport time\nimport imutils\nimport math\nfrom matplotlib import pyplot as plt\nfrom imutils.video import FPS\n\n# Functions\nimport HSV_filter as hsv\nimport shape_recognition as shape\nimport triangulation as tri\nimport no_mask\nimport predict_real_time as pre\n#import calibration as calib\n\n# Save image paths\nvideoPath_left = '/home/daniel/Escritorio/MSc AAI/IRP/blender projects/RANGER/Render/videos/cassini004_L.mkv'\nvideoPath_right = '/home/daniel/Escritorio/MSc AAI/IRP/blender projects/RANGER/Render/videos/cassini004_R.mkv'\n\n# Open both cameras\ncap_right = cv2.VideoCapture(videoPath_right) \ncap_left = cv2.VideoCapture(videoPath_left)\nframe_rate = 120 #Camera frame rate (maximum at 120 fps)\n\nB = 5.13535 #Distance between the cameras [m]\nf = 50 #Camera lense's focal length [mm]\nalpha = 39.5978 #Camera field of view in the horizontal plane [degrees]\n\n# initialize the list of tracked points, the frame counter,\n# and the coordinate deltas\npts = deque(maxlen=32)\npts_left = deque(maxlen=32)\n\ncoordinates = deque(maxlen=32)\n\n(dX, dY, dZ) = (0, 0, 0)\nvelocity = 0\ndirection = \"\"\n\n# initialize the object trackers\ntracker_right = cv2.TrackerCSRT_create()\ntracker_left = cv2.TrackerCSRT_create()\n\n# initialize the bounding box coordinates of the object we are going to track\ninitBB_right = None\ninitBB_left = None\n# initialize the counter and the FPS throughput estimator\ncounter = -1\ncounter2 = -1\nfps = None\n\ntime.sleep(2.0)\n\n#Initial values\nimg_array_right = []\nimg_array_left = []\n\nwhile(True):\n counter += 1\n\n ret_right, frame_right = cap_right.read()\n ret_left, frame_left = cap_left.read()\n\n################## CALIBRATION #########################################################\n\n #frame_right, frame_left = calib.undistorted(frame_right, frame_left)\n\n########################################################################################\n\n # If cannot catch any frame, break\n if ret_right==False or ret_left==False: \n break\n\n else:\n # check to see if we are already tracking an object\n if initBB_right is not None and initBB_left is not None: \n\n # grab the new bounding box coordinates of the object\n (success_right, box_right) = tracker_right.update(frame_right)\n # check to see if the tracking was a success\n if success_right:\n (x, y, w, h) = [int(v) for v in box_right]\n cv2.rectangle(frame_right, (x, y), (x + w, y + h),\n (0, 255, 0), 2)\n\n # Turn string to integer\n x = int(x)\n y = int(y)\n w = int(w)\n h = int(h)\n\n center_right = (int(x+w/2), int(y+h/2))\n pts.appendleft(center_right)\n\n # grab the new bounding box coordinates of the object\n (success_left, box_left) = tracker_left.update(frame_left)\n # check to see if the tracking was a success\n if success_left:\n (x, y, w, h) = [int(v) for v in box_left]\n cv2.rectangle(frame_left, (x, y), (x + w, y + h),\n (0, 255, 0), 2)\n\n # Turn string to integer\n x = int(x)\n y = int(y)\n w = int(w)\n h = int(h)\n\n center_left = (int(x+w/2), int(y+h/2))\n pts_left.appendleft(center_right)\n\n # update the FPS counter\n fps.update()\n fps.stop()\n\n velocity_ms = velocity*fps.fps()\n\n # show fps\n cv2.putText(frame_right, \"FPS: {0:.2f}\".format(fps.fps()), (50, frame_right.shape[0] - 50), cv2.FONT_HERSHEY_SIMPLEX,\n 0.7, (0, 255, 0), 2)\n\n #show velocity\n cv2.putText(frame_right, \"Velocity: {0:.2f} m/s\".format(velocity_ms), (50, 180), cv2.FONT_HERSHEY_SIMPLEX,\n 0.7, (0, 255, 0), 2)\n\n else:\n # APPLYING HSV-FILTER:\n mask_right = hsv.add_HSV_filter(frame_right)\n mask_left = hsv.add_HSV_filter(frame_left)\n\n # # APPLYING SHAPE RECOGNITION:\n center_right, initBB_right = shape.find_bounding_box(frame_right, mask_right, pts)\n center_left, initBB_left = shape.find_bounding_box(frame_left, mask_left, pts_left)\n\n # start the FPS throughput estimator\n # start OpenCV object tracker using the supplied bounding box\n # coordinates, then start the FPS throughput estimator as well\n tracker_right.init(frame_right, initBB_right)\n tracker_left.init(frame_left, initBB_left) \n fps = FPS().start()\n\n ################## CALCULATING BALL DEPTH #########################################################\n\n # If no ball can be caught in one camera show text \"TRACKING LOST\"\n if np.all(center_right) == None or np.all(center_left) == None:\n cv2.putText(frame_right, \"TRACKING LOST\", (75,50), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,255),2)\n cv2.putText(frame_left, \"TRACKING LOST\", (75,50), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,255),2)\n\n else:\n counter2 += 1\n # Function to calculate depth of object. Outputs vector of all depths in case of several balls.\n # All formulas used to find depth is in video presentaion\n distance, xCoordinate, yCoordinate, depth, pixels2meters = tri.find_depth(center_right, center_left, frame_right, frame_left, B, f, alpha)\n\n cv2.putText(frame_right, \"TRACKING\", (50,50), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,255,0),2)\n cv2.putText(frame_left, \"TRACKING\", (50,50), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,255,0),2)\n cv2.putText(frame_right, \"Distance: \" + str(round(distance,3))+\" m\", (50,100), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,255,0),2)\n # cv2.putText(frame_left, \"Distance: \" + str(round(distance,3))+\" m\", (50,100), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,255,0),2)\n # Multiply computer value with 205.8 to get real-life depth in [cm]. The factor was found manually.\n print(\"Depth: \", distance) \n\n coordinates.appendleft((xCoordinate, yCoordinate, depth))\n\n # loop over the set of tracked points\n for i in np.arange(1, len(pts)):\n # if either of the tracked points are None, ignore\n # them\n if pts[i - 1] is None or pts[i] is None:\n continue\n # check to see if enough points have been accumulated in\n # the buffer\n if counter >= 10 and i == 1 and len(pts)==32:\n # compute the difference between the x and y\n # coordinates and re-initialize the direction\n # text variables\n dX = coordinates[i][0] - coordinates[-10][0]\n dY = coordinates[i][1] - coordinates[-10][1]\n dZ = coordinates[i][2] - coordinates[-10][2]\n\n velocity = (math.sqrt((dX*dX) + (dY*dY) + (dZ*dZ)))/22\n\n (dirX, dirY, dirZ) = (\"\", \"\", \"\")\n # ensure there is significant movement in the\n # x-direction\n if np.abs(dX) > 20:\n dirX = \"Right\" if np.sign(dX) == 1 else \"Left\"\n # ensure there is significant movement in the\n # y-direction\n if np.abs(dY) > 20:\n dirY = \"Down\" if np.sign(dY) == 1 else \"Up\"\n # ensure there is significant movement in the\n # z-direction\n if np.abs(dZ) > 20:\n dirZ = \"Inside\" if np.sign(dZ) == 1 else \"Outside\"\n # handle when both directions are non-empty\n if dirX != \"\" and dirY != \"\" and dirZ !=\"\":\n direction = \"{}-{}-{}\".format(dirX, dirY, dirZ)\n # otherwise, only one direction is non-empty\n else:\n direction = dirX if dirX != \"\" else dirY\n\n # otherwise, compute the thickness of the line and\n # draw the connecting lines\n thickness = int(np.sqrt(32 / float(i + 1)) * 2.5)\n cv2.line(frame_right, pts[i - 1], pts[i], (0, 0, 255), thickness)\n\n # Calculate the direction in degrees\n angleXY=math.degrees(math.atan2(dY,dX))\n if angleXY < 0:\n angleXY = 180 + 180-abs(angleXY)\n\n angleXZ=math.degrees(math.atan2(dZ,dX))\n if angleXZ < 0:\n angleXZ = 180 + 180-abs(angleXZ)\n\n # calculate size of the object\n if counter2 == 0:\n initial_pixels2meters = pixels2meters\n width = initBB_right[2] * initial_pixels2meters\n height = initBB_right[3] * initial_pixels2meters\n\n # show the coordinates\n cv2.putText(frame_right, \"X: {0:.2f} m, Y: {1:.2f} m, Z: {2:.2f} m\".format(xCoordinate, yCoordinate, depth), (50, 130), cv2.FONT_HERSHEY_SIMPLEX,\n 0.7, (0, 255, 0), 2)\n\n # show the movement deltas and the direction of movement on\n # the frame\n # cv2.putText(frame_right, \"angleXY: {0:.2f}, angleXZ: {1:.2f}\".format(angleXY, angleXZ), (10, 300), cv2.FONT_HERSHEY_SIMPLEX,\n # 0.7, (0, 255, 0), 2)\n # cv2.putText(frame_right, direction, (10, 30), cv2.FONT_HERSHEY_SIMPLEX,\n # 0.7, (0, 255, 0), 2)\n cv2.putText(frame_right, \"dx: {0:.2f} m, dy: {1:.2f} m, dz: {2:.2f} m\".format(dX, dY, dZ),\n (50, 210), cv2.FONT_HERSHEY_SIMPLEX,\n 0.7, (0, 255, 0), 2)\n # cv2.putText(frame_right, \"Velocity: {0:.2f}\".format(velocity), (10, 500), cv2.FONT_HERSHEY_SIMPLEX,\n # 0.7, (0, 255, 0), 2)\n \n #show size of the object\n cv2.putText(frame_right, \"Width: {0:.2f} m, Height {0:.2f} m\".format(width, height), (50, 260), cv2.FONT_HERSHEY_SIMPLEX,\n 0.7, (0, 255, 0), 2) \n\n\n height, width, layers = frame_right.shape\n size_right = (width,height)\n img_array_right.append(frame_right)\n\n height, width, layers = frame_left.shape\n size_left = (width,height)\n img_array_left.append(frame_left)\n\n\nout_right = cv2.VideoWriter('output_videos/project_R.avi',cv2.VideoWriter_fourcc(*'DIVX'), 15, size_right)\n \nfor i in range(len(img_array_right)):\n out_right.write(img_array_right[i])\nout_right.release()\n\n\nout_left = cv2.VideoWriter('output_videos/project_L.avi',cv2.VideoWriter_fourcc(*'DIVX'), 15, size_left)\n \nfor i in range(len(img_array_left)):\n out_left.write(img_array_left[i])\nout_left.release()\n\n# Release and destroy all windows before termination\ncap_right.release()\ncap_left.release()\n\ncv2.destroyAllWindows()","sub_path":"create_output_video.py","file_name":"create_output_video.py","file_ext":"py","file_size_in_byte":10851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"319470398","text":"#!/usr/bin/python\n\nfrom geometry import Point\nfrom group import Group\nimport random\n\nclass Food(Point):\n\tdef __str__(self):\n\t\treturn 'F'\n\n\tdef update(self, *args, **kwargs):\n\t\tpass\n\nclass FoodSource(Group):\n\tdef __init__(self, world, width, center, size):\n\t\tsuper(FoodSource, self).__init__()\n\t\tx,y = center\n\n\t\tmin_x = x - width\n\t\tmax_x = x + width\n\t\tmin_y = y - width\n\t\tmax_y = y + width\n\n\t\ti = 0 \n\t\twhile i < size:\n\t\t\tlocations = []\n\t\t\tx = random.randint(min_x, max_x)\n\t\t\ty = random.randint(min_y, max_y)\n\t\t\tif world.validLocation(x,y) and (x,y) not in locations:\n\t\t\t\tlocations.append((x,y))\n\t\t\t\tself.add(Food(x,y))\n\t\t\t\ti += 1\n","sub_path":"frenzy-bug/food.py","file_name":"food.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"306743868","text":"from flask import Blueprint, abort, request, jsonify\n\nimport logging\nimport re\nimport ipaddress\nfrom datetime import datetime\n\nfrom mr_provisioner import db\nfrom mr_provisioner.models import Interface, Machine, Lease, DiscoveredMAC, MachineEvent\nfrom mr_provisioner.util import MAC_REGEX, DHCP_ARCH_CODES, mac_vendor\nfrom sqlalchemy.exc import DatabaseError\n\nfrom flask import current_app as app\n\nfrom schema import Schema, And, Or, Use, SchemaError\nfrom functools import reduce\n\n\nmod = Blueprint('dhcp', __name__, template_folder='templates')\nlogger = logging.getLogger('dhcp')\n\nlease_schema = Schema({\n 'mac': And(str, lambda s: re.match(MAC_REGEX, s) is not None),\n 'ipv4': Use(ipaddress.IPv4Address),\n 'duration': And(int, lambda i: i >= 0)\n})\n\nseen_schema = Schema({\n 'discover': bool,\n 'mac': And(str, lambda s: re.match(MAC_REGEX, s) is not None),\n 'options': [{\n 'option': And(int, lambda i: i >= 0),\n 'value': Or(int, str)\n }]\n})\n\nsubnet_schema = Schema({\n 'mac': And(str, lambda s: re.match(MAC_REGEX, s) is not None),\n 'subnets': [{\n 'subnetId': int,\n 'prefix': Use(ipaddress.IPv4Address),\n 'prefixLen': And(int, lambda i: i >= 0 and i <= 32),\n 'pools': [{\n 'poolId': int,\n 'capacity': int,\n 'firstIP': Use(ipaddress.IPv4Address),\n 'lastIP': Use(ipaddress.IPv4Address),\n }]\n }]\n})\n\n\n@mod.route('/ipv4', methods=['GET'])\ndef index():\n hwaddr = request.args.get('hwaddr')\n if not hwaddr:\n abort(400)\n\n machine = Machine.by_mac(hwaddr)\n if not machine:\n abort(404)\n\n interface = Interface.by_mac(hwaddr)\n if not interface:\n abort(404)\n\n # query param ?hwaddr=\n # response:\n # {\n # \"ipv4\": \"\",\n # \"next-server\": \"\",\n # \"options\": [\n # { \"option\": number, \"value\": \"\" }\n # ]\n # }\n # option 67 is bootfile\n data = {\n 'options': []\n }\n\n if machine.netboot_enabled:\n bootloader_image = machine.bootloader\n bootfile = bootloader_image.filename if bootloader_image else app.config['DHCP_DEFAULT_BOOTFILE']\n\n data['next-server'] = app.config['DHCP_TFTP_PROXY_HOST']\n data['options'].append({'option': 67, 'value': bootfile})\n\n use_static = True if interface.static_ipv4 else False\n\n if interface.reserved_ipv4 and not use_static:\n data['ipv4'] = interface.reserved_ipv4\n\n return jsonify(data), 200\n\n\n@mod.route('/ipv4/lease', methods=['POST'])\ndef lease():\n data = request.get_json(force=True)\n try:\n lease_schema.validate(data)\n except SchemaError as e:\n return str(e), 400\n\n lease = Lease.by_mac(data['mac'])\n if lease:\n lease.ipv4 = data['ipv4']\n lease.last_seen = datetime.utcnow()\n db.session.commit()\n else:\n lease = Lease(mac=data['mac'], ipv4=data['ipv4'])\n db.session.add(lease)\n db.session.commit()\n\n return \"\", 201\n\n\n@mod.route('/ipv4/seen', methods=['POST'])\ndef seen():\n data = request.get_json(force=True)\n try:\n seen_schema.validate(data)\n except SchemaError as e:\n return str(e), 400\n\n interface = Interface.by_mac(data['mac'])\n if interface:\n # Already assigned, don't care - but log an event\n MachineEvent.dhcp_request(interface.machine.id, None, discover=data['discover'])\n return \"\", 200\n\n options = {o['option']: o['value'] for o in data['options']}\n\n info = {}\n\n info['mac_vendor'] = mac_vendor(data['mac'])\n\n if 12 in options:\n # hostname option\n info['hostname'] = options[12]\n\n if 93 in options:\n # processor architecture as per rfc4578\n code = options[93]\n info['arch_code'] = code\n info['arch'] = DHCP_ARCH_CODES.get(code, 'unknown')\n\n discovered_mac = DiscoveredMAC.by_mac(data['mac'])\n if discovered_mac:\n discovered_mac.info = info\n discovered_mac.last_seen = datetime.utcnow()\n db.session.commit()\n else:\n discovered_mac = DiscoveredMAC(mac=data['mac'], info=info)\n db.session.add(discovered_mac)\n db.session.commit()\n\n return \"\", 202\n\n\n@mod.route('/ipv4/subnet', methods=['POST'])\ndef subnet():\n data = request.get_json(force=True)\n try:\n data = subnet_schema.validate(data)\n except SchemaError as e:\n return str(e), 400\n\n hwaddr = data['mac']\n if not hwaddr:\n abort(400)\n\n interface = Interface.by_mac(hwaddr)\n if not interface:\n abort(404)\n\n if not interface.network:\n abort(404)\n\n use_static = True if interface.static_ipv4 else False\n use_reserved = True if not use_static and interface.reserved_ipv4 else False\n\n expected_pool = None\n if use_reserved:\n expected_pool = ipaddress.IPv4Network(interface.network.reserved_net)\n\n logger.info('expected_pool: %s' % expected_pool)\n\n if expected_pool is None:\n return jsonify({'subnetId': None}), 200\n\n response = {}\n\n for subnet in data['subnets']:\n net = ipaddress.IPv4Network((subnet['prefix'], subnet['prefixLen']), strict=False)\n if not net.overlaps(ipaddress.IPv4Network(interface.network.subnet)):\n continue\n\n pool_matches = reduce(lambda r, p: r or p['firstIP'] in expected_pool, subnet['pools'], False)\n if pool_matches:\n response['subnetId'] = subnet['subnetId']\n logger.info('matched subnet: %s' % subnet)\n break\n\n return jsonify(response), 200\n\n\n@mod.errorhandler(DatabaseError)\ndef handle_db_error(error):\n db.session.rollback()\n raise\n","sub_path":"mr_provisioner/dhcp/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":5608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"466240562","text":"def my_sort(numbers):\n \"\"\"Function to sort a list into odd and even numbers respectively\"\"\"\n odd_list = []\n even_list = []\n \n for num in numbers:\n if num%2 == 0:\n even_list.append(num)\n else:\n odd_list.append(num)\n \n return sorted(odd_list) + sorted(even_list)","sub_path":"qualifiedIOAndela_sortNumbers.py","file_name":"qualifiedIOAndela_sortNumbers.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"222610103","text":"#!/usr/bin/evn python\n\nfrom pyspark import SparkConf, SparkContext\n\nconf = SparkConf().setMaster('local').setAppName('App')\nsc = SparkContext(conf=conf)\n\nrdd_list = sc.parallelize([1, 2, 3, 4, 5])\nrdd_dict = sc.parallelize([('a', 1), ('b', 2), ('c', 3), ('a', 4), ('b', 5)])\n\n\ndef f(x, y):\n print('-', x, y)\n return 0, x[1] + y[1]\n\n\nprint(rdd_dict.reduce(f))\n# fold和reduce功能一样,只是前者需要传入一个零值作为第一次和最后一次调用f时的第一个参数\nprint(rdd_dict.fold((0, 0), f))\n\n\ndef f2(x, y):\n print('-', x, y)\n return x + y\n\n\n# 如果key是唯一的,则不调用f2\nprint(rdd_dict.reduceByKey(f2).glom().collect())\n# reduceByKeyLocally和reduceByKey功能一样,只是前者返回的是Dict类型,后者是RDD类型\nprint(rdd_dict.reduceByKeyLocally(f2))\n","sub_path":"test_rdd/reduce.py","file_name":"reduce.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"595190707","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport md5\nimport time\ntry:\n import json\nexcept:\n import simplejson as json\nimport uuid\nimport requests\nimport operator\n\nfrom utils import EnumDict, ChannelException, get_access_token\n\nCHANNEL_URL = 'https://channel.api.duapp.com/rest/2.0/channel/%s'\n\nPUSH_TYPE = EnumDict()\nPUSH_TYPE.USER = 1\nPUSH_TYPE.TAG = 2\nPUSH_TYPE.ALL = 3\n\n\nclass Channel(object):\n \"\"\"\n Channel\n ======\n 需要两个参数: api_key、 api_secret\n \"\"\"\n access_token = None\n expires = time.time()\n\n def __init__(self, api_key, api_secret):\n self.api_key = api_key\n self.api_secret = api_secret\n\n def refresh_access_token(self):\n access_token = get_access_token(self.api_key, self.api_secret)\n self.expires = access_token.expires_in + time.time()\n self.access_token = access_token.access_token\n\n def _request(self, channel_id='channel', params={}):\n if time.time() + 3600 > self.expires:\n self.refresh_access_token()\n url = CHANNEL_URL % channel_id\n\n params.update({\n 'access_token': self.access_token,\n })\n\n r = requests.post(url, data=params)\n if r.status_code != requests.codes.ok:\n raise ChannelException(r.status_code, r.text)\n return EnumDict(json.loads(r.text))\n\n def query_bindlist(self, user_id, device_type=0, start=0, limit=10,\n channel_id='channel'):\n \"\"\"\n 查询设备、应用、用户与百度Channel的绑定关系\n \"\"\"\n params = {\n 'user_id': user_id,\n 'start': start,\n 'limit': limit,\n 'method': 'query_bindlist',\n 'device_type': device_type,\n }\n return self._request(params=params)\n\n def push_msg(self, msgs, user_id=None, tag=None, channel_id=None,\n device_type=3, message_type=0):\n \"\"\"\n 推送消息\n \"\"\"\n params = {}\n\n if tag:\n push_type = PUSH_TYPE.TAG\n params.update(tag=tag)\n elif not tag and user_id:\n push_type = PUSH_TYPE.USER\n params.update(user_id=user_id)\n else:\n push_type = PUSH_TYPE.ALL\n\n msgs = msgs if isinstance(msgs, (list, tuple)) else [msgs]\n params.update(\n push_type=push_type,\n messages=json.dumps(msgs),\n msg_keys=json.dumps([uuid.uuid4().hex for x in msgs]),\n device_type=device_type,\n message_type=message_type,\n method='push_msg',\n )\n return self._request(channel_id=channel_id, params=params)\n\n def init_app_ioscert(self, name, description, release_cert, dev_cert):\n \"\"\"\n 上传iOS apns证书\n \"\"\"\n params = {\n 'name': name,\n 'description': description,\n 'release_cert': release_cert,\n 'dev_cert': dev_cert,\n 'method': 'init_app_ioscert',\n }\n return self._request(params=params)\n\n def update_app_ioscert(self, name=None, description=None,\n release_cert=None, dev_cert=None):\n \"\"\"\n 更新iOS设备的推送证书相关内容\n \"\"\"\n params = {\n 'name': name,\n 'description': description,\n 'release_cert': release_cert,\n 'dev_cert': dev_cert,\n 'method': 'update_app_ioscert',\n }\n return self._request(params=params)\n\n def delete_app_ioscert(self):\n \"\"\"\n 删除iOS设备的推送证书\n \"\"\"\n return self._request(params={'method': 'delete_app_ioscert'})\n\n def query_app_ioscert(self):\n \"\"\"\n 查询该App server对应的iOS证书。\n \"\"\"\n return self._request(params={'method': 'query_app_ioscert'})\n\n def verify_bind(self, user_id, device_type=None):\n \"\"\"\n 判断设备、应用、用户与Channel的绑定关系是否存在\n \"\"\"\n params = {\n 'method': 'verify_bind',\n 'user_id': user_id,\n 'device_type': device_type,\n }\n return self._request(params=params)\n\n def fetch_msg(self, user_id, start=0, limit=10):\n \"\"\"\n 查询离线消息\n \"\"\"\n params = {\n 'method': 'fetch_msg',\n 'user_id': user_id,\n 'start': start,\n 'limit': limit,\n }\n return self._request(params=params)\n\n def fetch_msgcount(self, user_id):\n \"\"\"\n 查询离线消息的个数\n \"\"\"\n params = {\n 'method': 'fetch_msgcount',\n 'user_id':user_id,\n }\n return self._request(params=params)\n\n def delete_msg(self, user_id, msg_ids):\n \"\"\"\n 删除离线消息\n \"\"\"\n msg_ids = msg_ids if isinstance(msg_ids, (list, tuple)) else [msg_ids]\n params = {\n 'method': 'delete_msg',\n 'msg_ids': json.dumps(msg_ids),\n 'user_id': user_id,\n }\n return self._request(params=params)\n\n\n def set_tag(self, tag, user_id=None):\n \"\"\"\n 服务器端设置用户标签\n \"\"\"\n params = {\n 'method': 'set_tag',\n 'user_id': user_id,\n 'tag': tag,\n }\n return self._request(params=params)\n\n def fetch_tag(self, tag=None, start=0, limit=10):\n \"\"\"\n App Server查询应用标签\n \"\"\"\n params = {\n 'method': 'fetch_tag',\n 'tag': tag,\n 'start': start,\n 'limit': limit,\n }\n return self._request(params=params)\n\n def delete_tag(self, tag, user_id=None):\n \"\"\"\n 服务端删除用户标签\n \"\"\"\n params = {\n 'method': 'delete_tag',\n 'tag': tag,\n 'user_id': user_id,\n }\n return self._request(params=params)\n\n def query_user_tags(self, user_id):\n \"\"\"\n App Server查询用户所属的标签列表\n \"\"\"\n params = {\n 'method': 'query_user_tags',\n 'user_id': user_id,\n }\n return self._request(params=params)\n\n def query_device_type(self):\n \"\"\"\n 根据channel_id查询设备类型\n \"\"\"\n params = {\n 'method': 'query_device_type',\n }\n return self._request(params=params)\n\n\n","sub_path":"src/bdchannel/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":6408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"216373080","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n\t# Need to add id to process/destroy route\n\turl(r'^process/destroy/(?P\\d+)/delete$', views.delete, name='courses_delete'),\n\turl(r'^process/destroy/(?P\\d+)$', views.destroy, name='courses_destroy'),\n\turl(r'^process$', views.process, name='courses_process'),\n\turl(r'^$', views.index, name='courses_index'),\n]\n","sub_path":"apps/course_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"584500758","text":"from . import models\nfrom . import schema\n\nimport re\nimport magic\nimport mimetypes\nimport boto3\nfrom botocore.client import Config\nfrom mongoengine import connect\nfrom pydub import AudioSegment\nimport io\nimport hashlib\nfrom base64 import urlsafe_b64encode\n\n#MONGO_URI = f'mongodb://{MONGO_USERNAME}:{MONGO_PASSWORD}@{MONGO_IP}/{MONGO_DB}?authSource={MONGO_AUTH_DB}'\n\nconfig = None\n\n'''\nDefaults\nModified when init_app() called\n'''\nREGION = 'sfo2'\nSTATIC_FILE_BASE_URL = f'https://{REGION}.digitaloceanspaces.com'\nsession = boto3.session.Session()\nclient = session.client('s3',\n region_name=REGION,\n endpoint_url=STATIC_FILE_BASE_URL,\n aws_access_key_id='CUT4SK6OYILHJJV3B5LD',\n aws_secret_access_key='yyIXed9h9kn6n9V4c/b64+ZRHtP8baR89lp3dqvOY34')\n\nBUCKET = 'ultracast-files'\nFILE_ACCESS = 'public-read'\n\ndef init_app(app):\n '''\n Init based off apps config\n '''\n config = app.config\n REGION = app.config[\"S3\"][\"REGION\"]\n STATIC_FILE_BASE_URL = f'https://{REGION}.digitaloceanspaces.com'\n client = session.client('s3',\n region_name=REGION,\n endpoint_url=STATIC_FILE_BASE_URL,\n aws_access_key_id=app.config[\"S3\"][\"AWS_ACCESS_KEY\"],\n aws_secret_access_key=app.config[\"S3\"][\"AWS_SECRET_ACCESS_KEY\"])\n\n BUCKET = app.config[\"S3\"][\"BUCKET\"]\n FILE_ACCESS = app.config[\"S3\"][\"FILE_ACCESS\"]\n\ndef connect_mongo(app_config):\n mongo_uri = \"mongodb://{u}:{p}@{ip}/{db}?authSource={auth_db}\".format(\n u=app_config[\"MONGO_USERNAME\"], p=app_config[\"MONGO_PASSWORD\"], \n ip=app_config[\"MONGO_IP\"], db=app_config[\"MONGO_DB\"], auth_db=app_config[\"MONGO_AUTH_DB\"])\n connect(host=mongo_uri)\n\n# Digital Ocean Space (Static-Files)\n\n\n\nclass IllegalMimeException(Exception):\n pass\n\n\ndef get_bucket_url():\n return re.sub(r\"^https://\", f\"https://{BUCKET}.\", STATIC_FILE_BASE_URL)\n\n\ndef get_file_url(filename):\n return get_bucket_url() + f\"/{filename}\"\n\n\ndef get_key_from_url(url):\n return re.sub(get_bucket_url() + \"/\", \"\", url)\n\n\ndef get_key_from_binary_data(data, ext=\"\"):\n return urlsafe_b64encode(hashlib.sha256(data).digest()).decode('UTF-8') + ext\n\n\ndef check_status(resp, ok_statuses, op):\n if resp['ResponseMetadata']['HTTPStatusCode'] not in ok_statuses:\n raise Exception(f\"Error for operation [{op}] - Response: {resp}\")\n\n\ndef file_exists(key):\n try:\n client.head_object(Bucket=BUCKET, Key=key)\n return True\n except:\n return False\n\n\ndef url_exists(url):\n return file_exists(get_key_from_url(url))\n\n\ndef get_key(data, key=None, ext=\"\"):\n if key is None:\n return get_key_from_binary_data(data, ext)\n else:\n return key\n\n\ndef check_mime(data, valid_mimes):\n try:\n mime_type = magic.from_buffer(data, mime=True)\n except:\n raise IllegalMimeException(f\"Could not interpret MIME type of payload\")\n\n if mime_type not in valid_mimes:\n raise IllegalMimeException(f\"MIME type {mime_type} not allowed\")\n return mime_type\n\n\ndef add_file(data, key=None, valid_mimes=[], override=False):\n mime_type = check_mime(data, valid_mimes)\n extension = mimetypes.guess_extension(mime_type)\n key = get_key(data, key, extension)\n\n if not override and file_exists(key):\n return get_file_url(key)\n\n resp = client.put_object(\n Body=data,\n Bucket=BUCKET,\n Key=key,\n ACL=FILE_ACCESS,\n ContentType=mime_type)\n check_status(resp, [200], 'Add File')\n return get_file_url(key)\n\n\ndef remove_file(url, key=None):\n if key is None:\n resp = client.delete_object(Bucket=BUCKET, Key=get_key_from_url(url))\n else:\n resp = client.delete_object(Bucket=BUCKET, Key=key)\n check_status(resp, [200, 204], 'Remove File')\n\n\ndef update_file(old_url, data, new_key=None, valid_mimes=[]):\n if url_exists(old_url):\n remove_file(old_url)\n return add_file(data, new_key, valid_mimes)\n\ndef audio_file_duration_secs(data):\n try:\n audio = AudioSegment.from_file(io.BytesIO(data), format=\"mp3\")\n return int(round(audio.duration_seconds))\n except:\n return -1\n","sub_path":"backend/webserver/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":4265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"431829493","text":"import functools\nimport os\nimport shutil\nfrom pathlib import Path\nfrom tempfile import mkstemp\n\nimport pandas as pd\n\nfrom powersimdata.utility import server_setup\n\n\ndef verify_hash(func):\n \"\"\"Utility function which verifies the sha1sum of the file before writing\n it on the server. Operates on methods that return an updated scenario or\n execute list.\n \"\"\"\n\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n checksum = self.data_access.checksum(self._FILE_NAME)\n table = func(self, *args, **kwargs)\n self.commit(table, checksum)\n return table\n\n return wrapper\n\n\nclass CsvStore:\n \"\"\"Base class for common functionality used to manage scenario and execute\n list stored as csv files on the server\n\n :param powersimdata.data_access.data_access.DataAccess: data access object\n \"\"\"\n\n def __init__(self, data_access):\n \"\"\"Constructor\"\"\"\n self.data_access = data_access\n\n def get_table(self):\n \"\"\"Read the given file from the server, falling back to local copy if\n unable to connect.\n\n :return: (*pandas.DataFrame*) -- the specified table as a data frame.\n \"\"\"\n filename = self._FILE_NAME\n local_path = Path(server_setup.LOCAL_DIR, filename)\n\n try:\n self.data_access.copy_from(filename)\n except: # noqa\n print(f\"Failed to download {filename} from server\")\n print(\"Falling back to local cache...\")\n\n if local_path.is_file():\n return self._parse_csv(local_path)\n else:\n raise FileNotFoundError(f\"{filename} does not exist locally.\")\n\n def _parse_csv(self, file_object):\n \"\"\"Read file from disk into data frame\n\n :param str, path object or file-like object file_object: a reference to\n the csv file\n :return: (*pandas.DataFrame*) -- the specified file as a data frame.\n \"\"\"\n table = pd.read_csv(file_object)\n table.set_index(\"id\", inplace=True)\n table.fillna(\"\", inplace=True)\n return table.astype(str)\n\n def commit(self, table, checksum):\n \"\"\"Save to local directory and upload if needed\n\n :param pandas.DataFrame table: the data frame to save\n :param str checksum: the checksum prior to download\n \"\"\"\n tmp_file, tmp_path = mkstemp(dir=server_setup.LOCAL_DIR)\n table.to_csv(tmp_path)\n shutil.copy(tmp_path, os.path.join(server_setup.LOCAL_DIR, self._FILE_NAME))\n os.close(tmp_file)\n tmp_name = os.path.basename(tmp_path)\n self.data_access.push(tmp_name, checksum, change_name_to=self._FILE_NAME)\n if os.path.exists(tmp_path): # only required if data_access is LocalDataAccess\n os.remove(tmp_path)\n","sub_path":"powersimdata/data_access/csv_store.py","file_name":"csv_store.py","file_ext":"py","file_size_in_byte":2764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"25844603","text":"x, y = np.indices((100, 100))\nsig = np.sin(2*np.pi*x/50.)*np.sin(2*np.pi*y/50.)*(1+x*y/50.**2)**2\nmask = sig > 1\nlabels, nb = ndimage.label(mask)\nnb\nareas = ndimage.sum(mask, labels, np.arange(1, labels.max()+1))\nareas\nmaxima = ndimage.maximum(sig, labels, np.arange(1, labels.max()+1))\nmaxima\n\n","sub_path":"course_in_french/source/script_sets_measure.py","file_name":"script_sets_measure.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"29896578","text":"import random, time, pygame, sys\r\nfrom pygame.locals import *\r\nfrom random import randint\r\n\r\n#Defining my FPS and everything that relies on it\r\nFPS = float(60)\r\nMOVESIDEWAYSRATE = 14/FPS\r\nMOVEDOWNRATE = 14/FPS\r\nLOCKRATE = 30/FPS\r\nWAITRATE = 50/FPS\r\nLINECLEARRATE = 41/FPS\r\nG = 3/FPS\r\n\r\n#Defining the dimensions of my screen and board\r\nWIDTH = 720\r\nHEIGHT = 480\r\nBSIZE = 20\r\nBWIDTH = 10\r\nBHEIGHT = 20\r\nP1XMARGIN = int((WIDTH - BWIDTH * BSIZE) / 10)\r\nP2XMARGIN = int((WIDTH - BWIDTH * BSIZE) / 4 * 3)\r\nTOPMARGIN = HEIGHT - (BHEIGHT * BSIZE) - 5\r\n\r\n#Defining all of my colours used using RGB\r\nWHITE = (255, 255, 255)\r\nGRAY = (185, 185, 185)\r\nBLACK = (0.0, 0.0, 0.0)\r\nAQUA = (0.0, 240, 240)\r\nLAQUA = (179, 251, 251)\r\nMAQUA = (0.0, 216, 216)\r\nDAQUA = (0.0, 120, 120)\r\nGREEN = (0.0, 240, 0.0)\r\nLGREEN = (179, 251, 179)\r\nMGREEN = (0.0, 216, 0.0)\r\nDGREEN = (0.0, 120, 0.0)\r\nBLUE = (0.0, 0.0, 240)\r\nLBLUE = (179, 179, 251)\r\nMBLUE = (0.0, 0.0, 216)\r\nDBLUE = (0.0, 0.0, 120)\r\nPURPLE = (160, 0.0, 240)\r\nLPURPLE = (227, 179, 251)\r\nMPURPLE = (144, 0.0, 216)\r\nDPURPLE = (80, 0.0, 120)\r\nORANGE = (240, 160, 0.0)\r\nLORANGE = (251, 227, 179)\r\nMORANGE = (216, 144, 0.0)\r\nDORANGE = (120, 80, 0.0)\r\nRED = (240, 0.0, 0.0)\r\nLRED = (251, 179, 179)\r\nMRED = (216, 0.0, 0.0)\r\nDRED = (120, 0.0, 0.0)\r\nYELLOW = (240, 240, 0.0)\r\nLYELLOW = (251, 251, 179)\r\nMYELLOW = (216, 216, 0.0)\r\nDYELLOW = (120, 120, 0.0)\r\n\r\n#Defining colour groups\r\nCOLOURS = [AQUA, GREEN, BLUE, PURPLE, ORANGE, RED, YELLOW]\r\nLCOLOURS = [LAQUA, LGREEN, LBLUE, LPURPLE, LORANGE, LRED, LYELLOW]\r\nMCOLOURS = [MAQUA, MGREEN, MBLUE, MPURPLE, MORANGE, MRED, MYELLOW]\r\nDCOLOURS = [DAQUA, DGREEN, DBLUE, DPURPLE, DORANGE, DRED, DYELLOW]\r\n\r\n#Making sure the colour groups are the same length\r\nassert len(COLOURS) == len(LCOLOURS) and len(DCOLOURS) == \\\r\n len(MCOLOURS) and len(COLOURS) == len(DCOLOURS)\r\n\r\n#I make a group of groups, so I can randomize my background colour\r\nCOL = [COLOURS, LCOLOURS, MCOLOURS, DCOLOURS]\r\n\r\n#I define my tetrinomes; the blocks being used\r\nTWIDTH = 5\r\nTHEIGHT = 5\r\nBLANK = \"X\"\r\n\r\nT_BLOCK = [[\"XXXXX\",\r\n \"XXXXX\",\r\n \"XOOOX\",\r\n \"XXOXX\",\r\n \"XXXXX\"],\r\n [\"XXXXX\",\r\n \"XXOXX\",\r\n \"XOOXX\",\r\n \"XXOXX\",\r\n \"XXXXX\"],\r\n [\"XXXXX\",\r\n \"XXOXX\",\r\n \"XOOOX\",\r\n \"XXXXX\",\r\n \"XXXXX\"],\r\n [\"XXXXX\",\r\n \"XXOXX\",\r\n \"XXOOX\",\r\n \"XXOXX\",\r\n \"XXXXX\"]]\r\n\r\nZ_BLOCK = [[\"XXXXX\",\r\n \"XXXXX\",\r\n \"XOOXX\",\r\n \"XXOOX\",\r\n \"XXXXX\"],\r\n [\"XXXXX\",\r\n \"XXOXX\",\r\n \"XOOXX\",\r\n \"XOXXX\",\r\n \"XXXXX\"]]\r\n\r\nJ_BLOCK = [[\"XXXXX\",\r\n \"XXXXX\",\r\n \"XOOOX\",\r\n \"XXXOX\",\r\n \"XXXXX\"],\r\n [\"XXXXX\",\r\n \"XXOXX\",\r\n \"XXOXX\",\r\n \"XOOXX\",\r\n \"XXXXX\"],\r\n [\"XXXXX\",\r\n \"XOXXX\",\r\n \"XOOOX\",\r\n \"XXXXX\",\r\n \"XXXXX\"],\r\n [\"XXXXX\",\r\n \"XXOOX\",\r\n \"XXOXX\",\r\n \"XXOXX\",\r\n \"XXXXX\"]]\r\n\r\nS_BLOCK = [[\"XXXXX\",\r\n \"XXXXX\",\r\n \"XXOOX\",\r\n \"XOOXX\",\r\n \"XXXXX\"],\r\n [\"XXXXX\",\r\n \"XXOXX\",\r\n \"XXOOX\",\r\n \"XXXOX\",\r\n \"XXXXX\"]]\r\n\r\nL_BLOCK = [[\"XXXXX\",\r\n \"XXXXX\",\r\n \"XOOOX\",\r\n \"XOXXX\",\r\n \"XXXXX\"],\r\n [\"XXXXX\",\r\n \"XOOXX\",\r\n \"XXOXX\",\r\n \"XXOXX\",\r\n \"XXXXX\"],\r\n [\"XXXXX\",\r\n \"XXXOX\",\r\n \"XOOOX\",\r\n \"XXXXX\",\r\n \"XXXXX\"],\r\n [\"XXXXX\",\r\n \"XXOXX\",\r\n \"XXOXX\",\r\n \"XXOOX\",\r\n \"XXXXX\"]]\r\n\r\nI_BLOCK = [[\"XXXXX\",\r\n \"XXXXX\",\r\n \"OOOOX\",\r\n \"XXXXX\",\r\n \"XXXXX\"],\r\n [\"XXOXX\",\r\n \"XXOXX\",\r\n \"XXOXX\",\r\n \"XXOXX\",\r\n \"XXXXX\"]]\r\n\r\nO_BLOCK = [[\"XXXXX\",\r\n \"XXXXX\",\r\n \"XOOXX\",\r\n \"XOOXX\",\r\n \"XXXXX\"]]\r\n\r\nPIECES = {0: T_BLOCK,\r\n 1: Z_BLOCK,\r\n 2: J_BLOCK,\r\n 3: S_BLOCK,\r\n 4: L_BLOCK,\r\n 5: I_BLOCK,\r\n 6: O_BLOCK,\r\n }\r\n\r\n#Main method\r\ndef main():\r\n #Some global variables; font sizes and the screen\r\n global SCREEN, SFONT, MFONT, LFONT, BCOLOUR\r\n pygame.init()\r\n #I define those global variables\r\n SCREEN = pygame.display.set_mode((WIDTH, HEIGHT))\r\n SFONT = pygame.font.SysFont(\"verdana\", 18)\r\n MFONT = pygame.font.SysFont(\"times\", 35)\r\n LFONT = pygame.font.SysFont(\"verdana\", 100)\r\n GHOST = COL[randint(0,3)]\r\n BCOLOUR = GHOST[randint(0,6)]\r\n pygame.display.set_caption(\"Tetris\")\r\n #I create a list of the last 4 used blocks\r\n p1List = [7, 7, 7, 7]\r\n p2List = [7, 7, 7, 7]\r\n p1InitialPiece = getNewPiece(p1List)\r\n p2InitialPiece = getNewPiece(p2List)\r\n p1GrandMaster = False\r\n p2GrandMaster = False\r\n #I add all of the graphical pieces shown at boot-up\r\n addNewPiece(p1InitialPiece, 1)\r\n addNewPiece(p2InitialPiece, 2)\r\n addBoard(getNewBoard(), 1)\r\n addBoard(getNewBoard(), 2)\r\n addStatus(0, 0, 0, 1)\r\n addStatus(0, 0, 0, 2)\r\n addRank(0, 1)\r\n addRank(0, 2)\r\n addTextScreen(\"Tetris\")\r\n #I start the game loop; I have three different endings for a game depending on your skill\r\n while True:\r\n p1Level, p2Level, p1InitialPiece, p2InitialPiece, p1GrandMaster, p2GrandMaster \\\r\n = runGame(p1InitialPiece, p2InitialPiece, p1List, p2List, p1GrandMaster, p2GrandMaster)\r\n if p1GrandMaster and p2GrandMaster:\r\n addTextScreen(\"DOUBLE GRAND MASTER!!!\")\r\n elif p1Level > 999 and not p1GrandMaster and p2GrandMaster:\r\n addTextScreen(\"P2 GRAND MASTER!!! P1 WIN!\")\r\n elif p2Level > 999 and p1GrandMaster and not p2GrandMaster:\r\n addTextScreen(\"P1 GRAND MASTER!!! P2 WIN!\")\r\n elif p1Level > 999 and p2Level > 999:\r\n addTextScreen(\"DOUBLE WIN!!!\")\r\n elif p1Level > p2Level:\r\n addTextScreen(\"P1 WINS!\")\r\n elif p1Level < p2Level:\r\n addTextScreen(\"P2 WINS!\")\r\n else:\r\n addTextScreen(\"YOU TIED!!!\")\r\n\r\n#runGame() starts the game and runs the game\r\n#Takes an initial piece, the used block list, and the grand master check\r\ndef runGame(p1InitialPiece, p2InitialPiece, p1List, p2List, p1GrandMaster, p2GrandMaster):\r\n #Set-up variables; the background image, a bunch of check times \\\r\n #boolean check variables, scoring integers, etc.\r\n bimage = randint(0,20)\r\n p1Board = getNewBoard()\r\n p2Board = getNewBoard()\r\n startTime = time.time()\r\n p1LastMoveDownTime = time.time()\r\n p2LastMoveDownTime = time.time()\r\n p1LastMoveSidewaysTime = time.time()\r\n p2LastMoveSidewaysTime = time.time()\r\n p1LastFallTime = time.time()\r\n p2LastFallTime = time.time()\r\n p1LastLockTime = time.time()\r\n p2LastLockTime = time.time()\r\n p1GhostFallTime = time.time()\r\n p2GhostFallTime = time.time()\r\n p1WaitTime = time.time()\r\n p2WaitTime = time.time()\r\n p1SoftTime = time.time()\r\n p2SoftTime = time.time()\r\n p1MovingDown = False\r\n p2MovingDown = False\r\n p1MovingLeft = False\r\n p2MovingLeft = False\r\n p1MovingRight = False\r\n p2MovingRight = False\r\n p1InstantDrop = False\r\n p2InstantDrop = False\r\n p1GhostLeft = False\r\n p2GhostLeft = False\r\n p1GhostRight = False\r\n p2GhostRight = False\r\n p1Lock = False\r\n p2Lock = False\r\n p1gm1 = False\r\n p2gm1 = False\r\n p1gm2 = False\r\n p2gm2 = False\r\n p1gm3 = False\r\n p2gm3 = False\r\n p1Play = True\r\n p2Play = True\r\n p1LockY = 0\r\n p2LockY = 0\r\n p1Level = 1\r\n p2Level = 1\r\n p1DropNum = 1\r\n p2DropNum = 1\r\n p1Combo = 1\r\n p2Combo = 1\r\n p1Bravo = 1\r\n p2Bravo = 1\r\n p1Soft = 0\r\n p2Soft = 0\r\n p1Score = 0\r\n p2Score = 0\r\n p1RemovedLines = 0\r\n p2RemovedLines = 0\r\n p1BaseLevel, p1FallRate, p1DropNum = getLevel(p1Level)\r\n p2BaseLevel, p2FallRate, p2DropNum = getLevel(p2Level)\r\n #I plop the first piece down\r\n p1FallingPiece = p1InitialPiece\r\n p2FallingPiece = p2InitialPiece\r\n p1NewPiece = getNewPiece(p1List)\r\n p2NewPiece = getNewPiece(p2List)\r\n\r\n #The game loop\r\n while True:\r\n #Once a piece has landed I calculate the score, check the gm requirements, and reset some variables\r\n if p1FallingPiece == None and p1Play:\r\n p1Score += ((p1Level + p1RemovedLines)/4 + p1Soft) * p1RemovedLines * p1Combo * p1Bravo\r\n if p1BaseLevel == 400 and p1Score >= 12000 and (time.time() - startTime <= 255) and not p1gm1:\r\n p1gm1 = True\r\n if p1BaseLevel == 600 and p1Score >= 40000 and (time.time() - startTime <= 420) and not p1gm2:\r\n p1gm2 = True\r\n if p1BaseLevel == 1100 and p1Score >= 126000 and (time.time() - startTime <= 810) and not p1gm3:\r\n p1gm3 = True\r\n if p1gm1 and p1gm2 and p1gm3:\r\n p1GrandMaster = True\r\n p1WaitTime = time.time()\r\n p1LastMoveSidewaysTime = time.time()\r\n p1LastFallTime = time.time()\r\n p1GhostFallTime = time.time()\r\n p1WaitTime = time.time()\r\n p1SoftTime = time.time()\r\n p1Soft = 0\r\n #I create a new block, put it at the top\r\n p1FallingPiece = p1NewPiece\r\n p1NewPiece = getNewPiece(p1List)\r\n #Due to the fall delay I implemented, I move the piece horizontally if it should have beforehand\r\n if p1MovingRight and isTruePosition(p1Board, p1FallingPiece, adjX=1):\r\n p1FallingPiece[\"x\"] += 1\r\n if p1MovingLeft and isTruePosition(p1Board, p1FallingPiece, adjX=-1):\r\n p1FallingPiece[\"x\"] -= 1\r\n #If the game is over, I switch a variable to show they aren't playing\r\n if not isTruePosition(p1Board, p1FallingPiece) or p1Level > 999:\r\n p1Play = False\r\n #This makes sure a player can get to a new hundreds of levels by only plopping a block down\r\n if p1Level % 100 != 99:\r\n p1Level += 1\r\n\r\n #Once a piece has landed I calculate the score, check the gm requirements, and reset some variables\r\n if p2FallingPiece == None and p2Play:\r\n p2Score += ((p2Level + p2RemovedLines)/4 + p2Soft) * p2RemovedLines * p2Combo * p2Bravo\r\n if p2BaseLevel == 400 and p2Score >= 12000 and (time.time() - startTime <= 255) and not p2gm1:\r\n p2gm1 = True\r\n if p2BaseLevel == 600 and p2Score >= 40000 and (time.time() - startTime <= 420) and not p2gm2:\r\n p2gm2 = True\r\n if p2BaseLevel == 1100 and p2Score >= 126000 and (time.time() - startTime <= 810) and not p2gm3:\r\n p2gm3 = True\r\n if p2gm1 and p2gm2 and p2gm3:\r\n p2GrandMaster = True\r\n p2WaitTime = time.time()\r\n p2LastMoveSidewaysTime = time.time()\r\n p2LastFallTime = time.time()\r\n p2GhostFallTime = time.time()\r\n p2WaitTime = time.time()\r\n p2SoftTime = time.time()\r\n p2Soft = 0\r\n #I create a new block, put it at the top\r\n p2FallingPiece = p2NewPiece\r\n p2NewPiece = getNewPiece(p2List)\r\n #Due to the fall delay I implemented, I move the piece horizontally if it should have beforehand\r\n if p2MovingRight and isTruePosition(p2Board, p2FallingPiece, adjX=1):\r\n p2FallingPiece[\"x\"] += 1\r\n if p2MovingLeft and isTruePosition(p2Board, p2FallingPiece, adjX=-1):\r\n p2FallingPiece[\"x\"] -= 1\r\n #If the game is over, I switch a variable to show they aren't playing\r\n if not isTruePosition(p2Board, p2FallingPiece) or p2Level > 999:\r\n p2Play = False\r\n #This makes sure a player can get to a new hundreds of levels by only plopping a block down\r\n if p2Level % 100 != 99:\r\n p2Level += 1 \r\n\r\n #Checks to see both people are finished playing and returns if they are\r\n if not p1Play and not p2Play:\r\n return p1Level, p2Level, p1InitialPiece, p2InitialPiece, p1GrandMaster, p2GrandMaster\r\n\r\n #Drawing all of the screen's elements\r\n BACKGROUND = background(\"images/\" + str(bimage) + \".jpg\", [0,0])\r\n SCREEN.fill([255, 255, 255])\r\n SCREEN.blit(BACKGROUND.image, BACKGROUND.rect)\r\n addBoard(p1Board, 1)\r\n addBoard(p2Board, 2)\r\n addStatus(p1Score, p1Level, p1BaseLevel, 1)\r\n addStatus(p2Score, p2Level, p2BaseLevel, 2)\r\n addNewPiece(p1NewPiece, 1)\r\n addNewPiece(p2NewPiece, 2)\r\n addRank(p1Score, 1)\r\n addRank(p2Score, 2)\r\n if p1FallingPiece != None:\r\n addPiece(p1FallingPiece, 1)\r\n if p2FallingPiece != None:\r\n addPiece(p2FallingPiece, 2)\r\n pygame.display.update()\r\n \r\n checkQuit()\r\n #Loop for handling events\r\n for event in pygame.event.get():\r\n if event.type == KEYUP:\r\n #Pauses game until a button is pressed\r\n if (event.key == K_p):\r\n addTextScreen(\"Paused\")\r\n p1LastFallTime = time.time()\r\n p2LastFallTime = time.time()\r\n p1LastMoveDownTime = time.time()\r\n p2LastMoveDownTime = time.time()\r\n p1LastMoveSidewaysTime = time.time()\r\n p2LastMoveSidewaysTime = time.time()\r\n #Stops moving the block left and realligns some variables\r\n elif (event.key == K_a):\r\n p1MovingLeft = False\r\n p1GhostLeft = False\r\n p1LastFallTime = p1GhostFallTime\r\n if p1GhostRight:\r\n p1MovingRight = True\r\n p1LastMoveSidewaysTime = time.time()\r\n if p1MovingRight and isTruePosition(p1Board, p1FallingPiece, adjX=1):\r\n p1FallingPiece[\"x\"] += 1\r\n elif (event.key == K_LEFT):\r\n p2MovingLeft = False\r\n p2GhostLeft = False\r\n p2LastFallTime = p2GhostFallTime\r\n if p2GhostRight:\r\n p2MovingRight = True\r\n p2LastMoveSidewaysTime = time.time()\r\n if p2MovingRight and isTruePosition(p2Board, p2FallingPiece, adjX=1):\r\n p2FallingPiece[\"x\"] += 1\r\n #Stops moving the block right and realligns some variables\r\n elif (event.key == K_d):\r\n p1MovingRight = False\r\n p1GhostRight = False\r\n p1LastFallTime = p1GhostFallTime\r\n if p1GhostLeft:\r\n p1MovingLeft = True\r\n p1LastMoveSidewaysTime = time.time()\r\n if p1MovingLeft and isTruePosition(p1Board, p1FallingPiece, adjX=-1):\r\n p1FallingPiece[\"x\"] -= 1\r\n elif (event.key == K_RIGHT):\r\n p2MovingRight = False\r\n p2GhostRight = False\r\n p2LastFallTime = p2GhostFallTime\r\n if p2GhostLeft:\r\n p2MovingLeft = True\r\n p2LastMoveSidewaysTime = time.time()\r\n if p2MovingLeft and isTruePosition(p2Board, p2FallingPiece, adjX=-1):\r\n p2FallingPiece[\"x\"] -= 1\r\n \r\n #Declares tracking variables false\r\n elif (event.key == K_s):\r\n p1MovingDown = False\r\n elif (event.key == K_DOWN):\r\n p2MovingDown = False\r\n elif (event.key == K_w):\r\n p1InstantDrop = False\r\n elif (event.key == K_UP):\r\n p2InstantDrop = False\r\n \r\n if time.time() - p1WaitTime <= WAITRATE:\r\n if event.type == KEYDOWN:\r\n #For the freeze at the top; sets up the horizontal variables ahead of time\r\n if (event.key == K_a) and isTruePosition(p1Board, p1FallingPiece, adjX=-1):\r\n p1MovingLeft = True\r\n p1MovingRight = False\r\n p1GhostLeft = True\r\n elif (event.key == K_LEFT) and isTruePosition(p2Board, p2FallingPiece, adjX=-1):\r\n p2MovingLeft = True\r\n p2MovingRight = False\r\n p2GhostLeft = True\r\n \r\n if time.time() - p2WaitTime <= WAITRATE:\r\n if event.type == KEYDOWN:\r\n if (event.key == K_d) and isTruePosition(p1Board, p1FallingPiece, adjX=1):\r\n p1MovingRight = True\r\n p1MovingLeft = False\r\n p1GhostRight = True\r\n elif (event.key == K_RIGHT) and isTruePosition(p2Board, p2FallingPiece, adjX=1):\r\n p2MovingRight = True\r\n p2MovingLeft = False\r\n p2GhostRight = True\r\n \r\n if time.time() - p1WaitTime > WAITRATE and p1Play:\r\n if event.type == KEYDOWN:\r\n #Moves the block left if valid\r\n if (event.key == K_a) and isTruePosition(p1Board, p1FallingPiece, adjX=-1):\r\n p1FallingPiece[\"x\"] -= 1\r\n p1MovingLeft = True\r\n p1MovingRight = False\r\n p1GhostLeft = True\r\n p1LastMoveSidewaysTime = time.time()\r\n #Moves the block right if valid\r\n elif (event.key == K_d) and isTruePosition(p1Board, p1FallingPiece, adjX=1):\r\n p1FallingPiece[\"x\"] += 1\r\n p1MovingRight = True\r\n p1MovingLeft = False\r\n p1GhostRight = True\r\n p1LastMoveSidewaysTime = time.time()\r\n #Rotating the block CW if valid; checks to see if it can be pushed one space to the left or right if necessary\r\n elif (event.key == K_c or event.key == K_b):\r\n p1FallingPiece[\"rotation\"] = (p1FallingPiece[\"rotation\"] + 1) % len(PIECES[p1FallingPiece[\"shape\"]])\r\n if not isTruePosition(p1Board, p1FallingPiece):\r\n if isTruePosition(p1Board, p1FallingPiece, adjX=1):\r\n p1FallingPiece[\"x\"] += 1\r\n if isTruePosition(p1Board, p1FallingPiece, adjX=-1):\r\n p1FallingPiece[\"x\"] -= 1\r\n if not isTruePosition(p1Board, p1FallingPiece):\r\n p1FallingPiece[\"rotation\"] = (p1FallingPiece[\"rotation\"] - 1) % len(PIECES[p1FallingPiece[\"shape\"]])\r\n #Same thing but CCW\r\n elif (event.key == K_v):\r\n p1FallingPiece[\"rotation\"] = (p1FallingPiece[\"rotation\"] - 1) % len(PIECES[p1FallingPiece[\"shape\"]])\r\n if not isTruePosition(p1Board, p1FallingPiece):\r\n if isTruePosition(p1Board, p1FallingPiece, adjX=1):\r\n p1FallingPiece[\"x\"] += 1\r\n if isTruePosition(p1Board, p1FallingPiece, adjX=-1):\r\n p1FallingPiece[\"x\"] -= 1\r\n if not isTruePosition(p1Board, p1FallingPiece):\r\n p1FallingPiece[\"rotation\"] = (p1FallingPiece[\"rotation\"] + 1) % len(PIECES[p1FallingPiece[\"shape\"]])\r\n #Makes the block go down faster; tallies up soft time and locks it at the bottom\r\n elif (event.key == K_s):\r\n p1MovingDown = True\r\n p1SoftTime = time.time()\r\n if isTruePosition(p1Board, p1FallingPiece, adjY=1):\r\n p1FallingPiece[\"y\"] += 1\r\n p1LastMoveDownTime = time.time()\r\n if not isTruePosition(p1Board, p1FallingPiece, adjY=1):\r\n p1Lock = True\r\n p1LastLockTime = time.time() - LOCKRATE\r\n p1LastFallTime = time.time() - p1FallRate\r\n #Drops the block to the bottom\r\n elif (event.key == K_w):\r\n p1MovingDown = False\r\n p1InstantDrop = True\r\n \r\n if time.time() - p2WaitTime > WAITRATE and p2Play:\r\n if event.type == KEYDOWN:\r\n #Moves the block left if valid\r\n if (event.key == K_LEFT) and isTruePosition(p2Board, p2FallingPiece, adjX=-1):\r\n p2FallingPiece[\"x\"] -= 1\r\n p2MovingLeft = True\r\n p2MovingRight = False\r\n p2GhostLeft = True\r\n p2LastMoveSidewaysTime = time.time()\r\n #Moves the block right if valid\r\n elif (event.key == K_RIGHT) and isTruePosition(p2Board, p2FallingPiece, adjX=1):\r\n p2FallingPiece[\"x\"] += 1\r\n p2MovingRight = True\r\n p2MovingLeft = False\r\n p2GhostRight = True\r\n p2LastMoveSidewaysTime = time.time()\r\n #Rotating the block CW if valid; checks to see if it can be pushed one space to the left or right if necessary\r\n elif (event.key == K_COMMA or event.key == K_SLASH):\r\n p2FallingPiece[\"rotation\"] = (p2FallingPiece[\"rotation\"] + 1) % len(PIECES[p2FallingPiece[\"shape\"]])\r\n if not isTruePosition(p2Board, p2FallingPiece):\r\n if isTruePosition(p2Board, p2FallingPiece, adjX=1):\r\n p2FallingPiece[\"x\"] += 1\r\n if isTruePosition(p2Board, p2FallingPiece, adjX=-1):\r\n p2FallingPiece[\"x\"] -= 1\r\n if not isTruePosition(p2Board, p2FallingPiece):\r\n p2FallingPiece[\"rotation\"] = (p2FallingPiece[\"rotation\"] - 1) % len(PIECES[p2FallingPiece[\"shape\"]])\r\n #Same thing but CCW\r\n elif (event.key == K_PERIOD):\r\n p2FallingPiece[\"rotation\"] = (p2FallingPiece[\"rotation\"] - 1) % len(PIECES[p2FallingPiece[\"shape\"]])\r\n if not isTruePosition(p2Board, p2FallingPiece):\r\n if isTruePosition(p2Board, p2FallingPiece, adjX=1):\r\n p2FallingPiece[\"x\"] += 1\r\n if isTruePosition(p2Board, p2FallingPiece, adjX=-1):\r\n p2FallingPiece[\"x\"] -= 1\r\n if not isTruePosition(p2Board, p2FallingPiece):\r\n p2FallingPiece[\"rotation\"] = (p2FallingPiece[\"rotation\"] + 1) % len(PIECES[p2FallingPiece[\"shape\"]])\r\n #Makes the block go down faster; tallies up soft time and locks it at the bottom\r\n elif (event.key == K_DOWN):\r\n p2MovingDown = True\r\n p2SoftTime = time.time()\r\n if isTruePosition(p2Board, p2FallingPiece, adjY=1):\r\n p2FallingPiece[\"y\"] += 1\r\n p2LastMoveDownTime = time.time()\r\n if not isTruePosition(p2Board, p2FallingPiece, adjY=1):\r\n p2Lock = True\r\n p2LastLockTime = time.time() - LOCKRATE\r\n p2LastFallTime = time.time() - p2FallRate\r\n #Drops the block to the bottom\r\n elif (event.key == K_UP):\r\n p2MovingDown = False\r\n p2InstantDrop = True\r\n \r\n if time.time() - p1WaitTime > WAITRATE and p1Play: \r\n #Same comment as above; checks to see how low the block can go\r\n if p1InstantDrop:\r\n for i in range(1, BHEIGHT):\r\n if not isTruePosition(p1Board, p1FallingPiece, adjY=i):\r\n break\r\n p1FallingPiece[\"y\"] += i - 1\r\n #I set up a block lock or unlock the piece depending on if it can move down\r\n if not isTruePosition(p1Board, p1FallingPiece, adjY=1) and not p1Lock and not (p1LockY == p1FallingPiece[\"y\"]):\r\n p1Lock = True\r\n p1LastLockTime = time.time()\r\n if isTruePosition(p1Board, p1FallingPiece, adjY=1):\r\n p1Lock = False\r\n p1LockY = p1FallingPiece[\"y\"]\r\n #I tally up the soft count\r\n if p1MovingDown and time.time() - p1SoftTime > 1/FPS:\r\n p1Soft += 1\r\n p1SoftTime = time.time()\r\n \r\n #Handles moving the block horizontally\r\n if (p1MovingLeft or p1MovingRight) and (time.time() - p1LastMoveSidewaysTime > MOVESIDEWAYSRATE or time.time() - p1LastMoveSidewaysTime == 0):\r\n if p1MovingLeft and isTruePosition(p1Board, p1FallingPiece, adjX=-1):\r\n p1FallingPiece[\"x\"] -= 1\r\n elif p1MovingRight and isTruePosition(p1Board, p1FallingPiece, adjX=1):\r\n p1FallingPiece[\"x\"] += 1\r\n else:\r\n p1LastFallTime = p1GhostFallTime\r\n \r\n #Handles the fall delay when moving horizontally\r\n if (p1MovingLeft and isTruePosition(p1Board, p1FallingPiece, adjX=-1)) or (p1MovingRight and isTruePosition(p1Board, p1FallingPiece, adjX=1)):\r\n p1LastFallTime = time.time()\r\n if p1MovingDown and (time.time() - p1LastMoveDownTime > MOVEDOWNRATE or time.time() - p1LastMoveDownTime == 0) and isTruePosition(p1Board, p1FallingPiece, adjY=1):\r\n p1FallingPiece[\"y\"] += 1\r\n \r\n #Check if it's time for the block to fall\r\n if time.time() - p1LastFallTime > p1FallRate:\r\n #See if it can go any lower\r\n if not isTruePosition(p1Board, p1FallingPiece, adjY=1):\r\n if time.time() - p1LastLockTime > LOCKRATE:\r\n #Stick the block on the board if it can't descend\r\n addToBoard(p1Board, p1FallingPiece)\r\n p1RemovedLines, p1Level, p1Combo, p1Bravo = removeWholeLines(p1Board, p1Level, p1Combo, p1Bravo)\r\n p1BaseLevel, p1FallRate, p1DropNum = getLevel(p1Level)\r\n p1FallingPiece = None\r\n #Block can go lower\r\n else:\r\n #I account for different difficulties\r\n if p1DropNum > 1:\r\n for i in range(0, p1DropNum):\r\n if isTruePosition(p1Board, p1FallingPiece, adjY=p1DropNum-i):\r\n p1FallingPiece[\"y\"] += p1DropNum - i\r\n break\r\n else:\r\n p1FallingPiece[\"y\"] += 1\r\n #I reset a few fall times\r\n p1LastFallTime = time.time()\r\n p1GhostFallTime = time.time()\r\n \r\n if time.time() - p2WaitTime > WAITRATE and p2Play: \r\n #Same comment as above; checks to see how low the block can go\r\n if p2InstantDrop:\r\n for i in range(1, BHEIGHT):\r\n if not isTruePosition(p2Board, p2FallingPiece, adjY=i):\r\n break\r\n p2FallingPiece[\"y\"] += i - 1\r\n #I set up a block lock or unlock the piece depending on if it can move down\r\n if not isTruePosition(p2Board, p2FallingPiece, adjY=1) and not p2Lock and not (p2LockY == p2FallingPiece[\"y\"]):\r\n p2Lock = True\r\n p2LastLockTime = time.time()\r\n if isTruePosition(p2Board, p2FallingPiece, adjY=1):\r\n p2Lock = False\r\n p2LockY = p2FallingPiece[\"y\"]\r\n #I tally up the soft count\r\n if p2MovingDown and time.time() - p2SoftTime > 1/FPS:\r\n p2Soft += 1\r\n p2SoftTime = time.time()\r\n \r\n #Handles moving the block horizontally\r\n if (p2MovingLeft or p2MovingRight) and (time.time() - p2LastMoveSidewaysTime > MOVESIDEWAYSRATE or time.time() - p2LastMoveSidewaysTime == 0):\r\n if p2MovingLeft and isTruePosition(p2Board, p2FallingPiece, adjX=-1):\r\n p2FallingPiece[\"x\"] -= 1\r\n elif p2MovingRight and isTruePosition(p2Board, p2FallingPiece, adjX=1):\r\n p2FallingPiece[\"x\"] += 1\r\n else:\r\n p2LastFallTime = p2GhostFallTime\r\n \r\n #Handles the fall delay when moving horizontally\r\n if (p2MovingLeft and isTruePosition(p2Board, p2FallingPiece, adjX=-1)) or (p2MovingRight and isTruePosition(p2Board, p2FallingPiece, adjX=1)):\r\n p2LastFallTime = time.time()\r\n if p2MovingDown and (time.time() - p2LastMoveDownTime > MOVEDOWNRATE or time.time() - p2LastMoveDownTime == 0) and isTruePosition(p2Board, p2FallingPiece, adjY=1):\r\n p2FallingPiece[\"y\"] += 1\r\n \r\n #Check if it's time for the block to fall\r\n if time.time() - p2LastFallTime > p2FallRate:\r\n #See if it can go any lower\r\n if not isTruePosition(p2Board, p2FallingPiece, adjY=1):\r\n if time.time() - p2LastLockTime > LOCKRATE:\r\n #Stick the block on the board if it can't descend\r\n addToBoard(p2Board, p2FallingPiece)\r\n p2RemovedLines, p2Level, p2Combo, p2Bravo = removeWholeLines(p2Board, p2Level, p2Combo, p2Bravo)\r\n p2BaseLevel, p2FallRate, p2DropNum = getLevel(p2Level)\r\n p2FallingPiece = None\r\n #Block can go lower\r\n else:\r\n #I account for different difficulties\r\n if p2DropNum > 1:\r\n for i in range(0, p2DropNum):\r\n if isTruePosition(p2Board, p2FallingPiece, adjY=p2DropNum-i):\r\n p2FallingPiece[\"y\"] += p2DropNum - i\r\n break\r\n else:\r\n p2FallingPiece[\"y\"] += 1\r\n #I reset a few fall times\r\n p2LastFallTime = time.time()\r\n p2GhostFallTime = time.time()\r\n\r\n#This creates the background; if you noticed up top it randomizes the background\r\nclass background(pygame.sprite.Sprite):\r\n def __init__(self, image_file, location):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.image = pygame.image.load(image_file)\r\n self.rect = self.image.get_rect()\r\n self.rect.left, self.rect.top = location\r\n\r\n#This draws the text on the screen\r\ndef makeTextObjs(text, font, COLOUR):\r\n surf = font.render(text, True, COLOUR)\r\n return surf, surf.get_rect()\r\n\r\n#Checks if the position the block wants to move into is actually available\r\ndef isTruePosition(board, block, adjX=0, adjY=0):\r\n #Checks if the block is not colliding and in the board\r\n for x in range(TWIDTH):\r\n for y in range(THEIGHT):\r\n isAboveBoard = y + block[\"y\"] + adjY < 0\r\n if isAboveBoard or PIECES[block[\"shape\"]][block[\"rotation\"]][y][x] == BLANK:\r\n continue\r\n if not ((x + block[\"x\"] + adjX >= 0) and (x + block[\"x\"] + adjX < BWIDTH) and (y + block[\"y\"] + adjY < BHEIGHT)):\r\n return False\r\n if board[x + block[\"x\"] + adjX][y + block[\"y\"] + adjY] != BLANK:\r\n return False\r\n return True\r\n\r\n#Checks to see if the board is cleared\r\ndef isClear(board):\r\n y = BHEIGHT - 1\r\n for x in range(BWIDTH):\r\n if board[x][y] != BLANK:\r\n return False\r\n return True\r\n\r\n#Checks the rank of the player and displays it\r\ndef addRank(score, p):\r\n rank = \"S9\"\r\n if score < 120000:\r\n rank = \"S8\"\r\n if score < 100000:\r\n rank = \"S7\"\r\n if score < 82000:\r\n rank = \"S6\"\r\n if score < 66000:\r\n rank = \"S5\"\r\n if score < 52000:\r\n rank = \"S4\"\r\n if score < 40000:\r\n rank = \"S3\"\r\n if score < 30000:\r\n rank = \"S2\"\r\n if score < 22000:\r\n rank = \"S1\"\r\n if score < 16000:\r\n rank = \"1\"\r\n if score < 12000:\r\n rank = \"2\"\r\n if score < 8000:\r\n rank = \"3\"\r\n if score < 5500:\r\n rank = \"4\"\r\n if score < 3500:\r\n rank = \"5\"\r\n if score < 2000:\r\n rank = \"6\"\r\n if score < 1400:\r\n rank = \"7\"\r\n if score < 800:\r\n rank = \"8\"\r\n if score < 400:\r\n rank = \"9\"\r\n rankDisplay = MFONT.render(\"%s\" % rank, True, WHITE)\r\n rankBlock = rankDisplay.get_rect()\r\n if p == 1:\r\n rankBlock.topleft = (WIDTH / 2 - 91, 80)\r\n if p == 2:\r\n rankBlock.topleft = (WIDTH - 113, 80)\r\n SCREEN.blit(rankDisplay, rankBlock)\r\n\r\n#Displays text on the screen\r\ndef addTextScreen(text):\r\n #If it's the word Tetris I adjust the height a little\r\n titleDisplay, titleBlock = makeTextObjs(text, LFONT, WHITE)\r\n if text != \"Tetris\":\r\n titleBlock.center = (int(WIDTH / 2), int(HEIGHT / 2))\r\n else:\r\n titleBlock.center = (int(WIDTH / 2) , int(HEIGHT / 4))\r\n SCREEN.blit(titleDisplay, titleBlock)\r\n\r\n #I draw appropriate additional text\r\n if text == \"Tetris\":\r\n pressKeyDisplay, pressKeyBlock = makeTextObjs(\"P1: ASD to move, W to instant drop,\", SFONT, WHITE)\r\n pressKeyBlock.center = (int(WIDTH / 2), int(HEIGHT / 4) + 80)\r\n SCREEN.blit(pressKeyDisplay, pressKeyBlock)\r\n pressKeyDisplay, pressKeyBlock = makeTextObjs(\"CVB to rotate. P2: Arrow keys to move,\", SFONT, WHITE)\r\n pressKeyBlock.center = (int(WIDTH / 2), int(HEIGHT / 4) + 105)\r\n SCREEN.blit(pressKeyDisplay, pressKeyBlock)\r\n pressKeyDisplay, pressKeyBlock = makeTextObjs(\"UP to instant drop, ,./ to rotate.\", SFONT, WHITE)\r\n pressKeyBlock.center = (int(WIDTH / 2), int(HEIGHT / 4) + 130)\r\n else:\r\n pressKeyDisplay, pressKeyBlock = makeTextObjs(\"Press any key to continue.\", SFONT, WHITE)\r\n pressKeyBlock.center = (int(WIDTH / 2), int(HEIGHT / 2) + 100)\r\n SCREEN.blit(pressKeyDisplay, pressKeyBlock)\r\n\r\n while checkForKeyPress() == None:\r\n pygame.display.update()\r\n\r\n#I check the base level, fall rate, and the drop mulitplier of the player\r\ndef getLevel(level):\r\n baseLevel = (int(level / 100) + 1) * 100\r\n fallRate = G\r\n dropNum = 20\r\n if level < 500:\r\n dropNum = 3\r\n if level < 450:\r\n dropNum = 4\r\n if level < 420:\r\n dropNum = 5\r\n if level < 400:\r\n dropNum = 4\r\n if level < 360:\r\n dropNum = 3\r\n if level < 330:\r\n dropNum = 2\r\n if level < 300:\r\n dropNum = 1\r\n if level < 251:\r\n fallRate = G*16/15\r\n if level < 247:\r\n fallRate = G*8/7\r\n if level < 243:\r\n fallRate = G*16/9\r\n if level < 239:\r\n fallRate = G*8/5\r\n if level < 236:\r\n fallRate = G*2\r\n if level < 233:\r\n fallRate = G*32/13\r\n if level < 230:\r\n fallRate = G*32/9\r\n if level < 220:\r\n fallRate = G*64/3\r\n if level < 200:\r\n fallRate = G*4/3\r\n if level < 170:\r\n fallRate = G*8/5\r\n if level < 160:\r\n fallRate = G*32/17\r\n if level < 140:\r\n fallRate = G*2\r\n if level < 120:\r\n fallRate = G*32/15\r\n if level < 100:\r\n fallRate = G*32/13\r\n if level < 90:\r\n fallRate = G*8/3\r\n if level < 80:\r\n fallRate = G*32/9\r\n if level < 70:\r\n fallRate = G*16/3\r\n if level < 60:\r\n fallRate = G*32/5\r\n if level < 50:\r\n fallRate = G*8\r\n if level < 40:\r\n fallRate = G*32/3\r\n if level < 35:\r\n fallRate = G*16\r\n if level < 30:\r\n fallRate = G*64/3\r\n return baseLevel, fallRate, dropNum\r\n\r\n#I draw text conveying information; the score, level, base level\r\ndef addStatus(score, level, baseLevel, p):\r\n scoreDisplay = SFONT.render(\"Score: %s\" % score, True, WHITE)\r\n scoreBlock = scoreDisplay.get_rect()\r\n if p == 1:\r\n scoreBlock.topleft = (WIDTH / 2 - 119, 20)\r\n if p == 2:\r\n scoreBlock.topleft = (WIDTH - 111, 20)\r\n SCREEN.blit(scoreDisplay, scoreBlock)\r\n #For this and base level, I account for the end of a player's game\r\n if level > 999:\r\n levelDisplay = SFONT.render(\"Level: 999\", True, WHITE)\r\n else:\r\n levelDisplay = SFONT.render(\"Level: %s\" % level, True, WHITE)\r\n levelBlock = levelDisplay.get_rect()\r\n if p == 1:\r\n levelBlock.topleft = (WIDTH / 2 - 98, 50)\r\n if p == 2:\r\n levelBlock.topleft = (WIDTH - 120, 50)\r\n SCREEN.blit(levelDisplay, levelBlock)\r\n\r\n #Got to have the dividing line for aestetics!\r\n levelDisplay = SFONT.render(\"----\", True, WHITE)\r\n levelBlock = levelDisplay.get_rect()\r\n if p == 1:\r\n levelBlock.topleft = (WIDTH / 2 - 36, 60)\r\n if p == 2:\r\n levelBlock.topleft = (WIDTH - 58, 60)\r\n SCREEN.blit(levelDisplay, levelBlock)\r\n\r\n if baseLevel > 999:\r\n baseLevelDisplay = SFONT.render(\"999\", True, WHITE)\r\n else:\r\n baseLevelDisplay = SFONT.render(\"%s\" % baseLevel, True, WHITE)\r\n baseLevelBlock = baseLevelDisplay.get_rect()\r\n if p == 1:\r\n baseLevelBlock.topleft = (WIDTH / 2 - 36, 70)\r\n if p == 2:\r\n baseLevelBlock.topleft = (WIDTH - 58, 70)\r\n SCREEN.blit(baseLevelDisplay, baseLevelBlock)\r\n\r\n#Returns a random block, making sure it's not a near repeat\r\ndef getNewPiece(pList):\r\n shape = random.choice(list(PIECES.keys()))\r\n for num in range(0, len(pList)):\r\n for part in range(0, len(pList)):\r\n if shape == pList[part]:\r\n shape = random.choice(list(PIECES.keys()))\r\n pList.append(shape)\r\n pList.remove(pList[0])\r\n newPiece = {\"shape\": shape,\r\n \"rotation\": 0,\r\n \"x\": int(BWIDTH / 2) - int(TWIDTH / 2),\r\n \"y\": -2,\r\n \"COLOUR\": shape}\r\n return newPiece\r\n\r\n#Fills in the board based on the last block that fell\r\ndef addToBoard(board, block):\r\n for x in range(TWIDTH):\r\n for y in range(THEIGHT):\r\n if PIECES[block[\"shape\"]][block[\"rotation\"]][y][x] != BLANK:\r\n board[x + block[\"x\"]][y + block[\"y\"]] = block[\"COLOUR\"]\r\n\r\n#Creates a new board\r\ndef getNewBoard():\r\n board = []\r\n for i in range(BWIDTH):\r\n board.append([BLANK] * BHEIGHT)\r\n return board\r\n\r\n#Draws the board; the boarder and translucent background of the \"Tetris\" area\r\ndef addBoard(board, p):\r\n if p == 1:\r\n pygame.draw.rect(SCREEN, BCOLOUR, (P1XMARGIN - 3, TOPMARGIN - 3, (BWIDTH * BSIZE) + 6, (BHEIGHT * BSIZE) + 6), 5)\r\n if p == 2:\r\n pygame.draw.rect(SCREEN, BCOLOUR, (P2XMARGIN - 3, TOPMARGIN - 3, (BWIDTH * BSIZE) + 6, (BHEIGHT * BSIZE) + 6), 5)\r\n bg = pygame.Surface((BSIZE * BWIDTH, BSIZE * BHEIGHT), pygame.SRCALPHA)\r\n bg.fill((0, 0, 0, 128))\r\n if p == 1:\r\n SCREEN.blit(bg, (P1XMARGIN, TOPMARGIN))\r\n if p == 2:\r\n SCREEN.blit(bg, (P2XMARGIN, TOPMARGIN))\r\n for x in range(BWIDTH):\r\n for y in range(BHEIGHT):\r\n addBox(x, y, p, board[x][y])\r\n\r\n#Checks if a line is filled with blocks\r\ndef isWholeLine(board, y):\r\n for x in range(BWIDTH):\r\n if board[x][y] == BLANK:\r\n return False\r\n return True\r\n\r\n#Removed completed lines, moves them down, and calculates some score multipliers\r\ndef removeWholeLines(board, level, combo, bravo):\r\n numLinesRemoved = 0\r\n y = BHEIGHT - 1\r\n while y >= 0:\r\n if isWholeLine(board, y):\r\n for pullDownY in range(y, 0, -1):\r\n for x in range(BWIDTH):\r\n board[x][pullDownY] = board[x][pullDownY-1]\r\n for x in range(BWIDTH):\r\n board[x][0] = BLANK\r\n numLinesRemoved += 1\r\n else:\r\n y -= 1\r\n combo += (2 * numLinesRemoved) - 2\r\n if not numLinesRemoved:\r\n combo = 1\r\n bravo = 1\r\n if isClear(board):\r\n bravo = 4\r\n level += numLinesRemoved\r\n return numLinesRemoved, level, combo, bravo\r\n\r\n#Check for a key press and differentiates a KEYUP from KEYDOWN\r\ndef checkForKeyPress():\r\n checkQuit()\r\n for event in pygame.event.get([KEYDOWN, KEYUP]):\r\n if event.type == KEYDOWN:\r\n continue\r\n return event.key\r\n return None\r\n\r\n#I add the framework of a block; the backbones\r\ndef addPiece(block, p, pixelx=None, pixely=None):\r\n shapeToDraw = PIECES[block[\"shape\"]][block[\"rotation\"]]\r\n if pixelx == None and pixely == None:\r\n pixelx, pixely = convertCoords(block[\"x\"], block[\"y\"], p)\r\n for x in range(TWIDTH):\r\n for y in range(THEIGHT):\r\n if shapeToDraw[y][x] != BLANK:\r\n addBox(None, None, p, block[\"COLOUR\"], pixelx + (x * BSIZE), pixely + (y * BSIZE))\r\n\r\n#Draws the next piece on the sidebar\r\ndef addNewPiece(block, p):\r\n nextDisplay = SFONT.render(\"Next:\", True, WHITE)\r\n nextBlock = nextDisplay.get_rect()\r\n if p == 1:\r\n nextBlock.topleft = (WIDTH / 2 - 81, 125)\r\n if p == 2:\r\n nextBlock.topleft = (WIDTH - 103, 125)\r\n SCREEN.blit(nextDisplay, nextBlock)\r\n if p == 1:\r\n addPiece(block, p, pixelx=WIDTH/2-81, pixely=130)\r\n if p == 2:\r\n addPiece(block, p, pixelx=WIDTH-103, pixely=130)\r\n\r\n#Converts the xy coords of the board the xy coords on the location of the screen\r\ndef convertCoords(boxx, boxy, p):\r\n if p == 1:\r\n return (P1XMARGIN + (boxx * BSIZE)), (TOPMARGIN + (boxy * BSIZE))\r\n if p == 2:\r\n return (P2XMARGIN + (boxx * BSIZE)), (TOPMARGIN + (boxy * BSIZE))\r\n\r\n#Draws a single block out of 4, including all of the shading\r\ndef addBox(boxx, boxy, p, COLOUR, pixelx=None, pixely=None):\r\n if COLOUR == BLANK:\r\n return\r\n if pixelx == None and pixely == None:\r\n pixelx, pixely = convertCoords(boxx, boxy, p)\r\n pygame.draw.rect(SCREEN, MCOLOURS[COLOUR], (pixelx + 1, pixely + 4, BSIZE - 1, BSIZE - 7))\r\n pygame.draw.rect(SCREEN, COLOURS[COLOUR], (pixelx + 4, pixely + 4, BSIZE - 7, BSIZE - 7))\r\n pygame.draw.rect(SCREEN, LCOLOURS[COLOUR], (pixelx + 1, pixely + 1, BSIZE - 1, 3))\r\n pygame.draw.rect(SCREEN, DCOLOURS[COLOUR], (pixelx + 1, pixely + 17, BSIZE - 1, 3))\r\n pygame.draw.rect(SCREEN, MCOLOURS[COLOUR], (pixelx + 1, pixely + 2, 1, BSIZE - 3))\r\n pygame.draw.rect(SCREEN, MCOLOURS[COLOUR], (pixelx + 2, pixely + 3, 1, BSIZE - 5))\r\n pygame.draw.rect(SCREEN, MCOLOURS[COLOUR], (pixelx + 19, pixely + 2, 1, BSIZE - 3))\r\n pygame.draw.rect(SCREEN, MCOLOURS[COLOUR], (pixelx + 18, pixely + 3, 1, BSIZE - 5))\r\n\r\n#Checks to see if the user wants to quit or not\r\ndef checkQuit():\r\n for event in pygame.event.get(QUIT):\r\n close()\r\n for event in pygame.event.get(KEYUP):\r\n if event.key == K_ESCAPE:\r\n close()\r\n pygame.event.post(event)\r\n\r\n#Closes the game\r\ndef close():\r\n pygame.quit()\r\n sys.exit()\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"TetrisMulti.py","file_name":"TetrisMulti.py","file_ext":"py","file_size_in_byte":43857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"405816928","text":"from peutil import *\n\ndef getam(x):\n\tl = factors(x)\n\treturn sum(l[:-1])\n\nd = {}\nfor i in range(1,10000):\n\td[i]=getam(i)\nprint(sum([k for k in d if d[k] in d and d[k]!=k and d[d[k]]==k]))","sub_path":"problems01_25/p21.py","file_name":"p21.py","file_ext":"py","file_size_in_byte":186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"253429935","text":"import traceback\n\nimport maya.cmds as cmds\n\nimport Qt\n\n# Maya 2017+\nif Qt.__binding__ == 'PySide2':\n from Qt.QtWidgets import *\n from Qt.QtGui import *\n from Qt.QtCore import *\n\n# Maya 2014-2016\nelif Qt.__binding__ == 'PySide':\n from Qt.QtGui import *\n from Qt.QtCore import *\n\nimport plugins\nimport utilities\nfrom utilities import widgets, scene, system, objects\n\n\n# VISIBLE_ICON = QIcon('%s/visible.png' % os.environ['TOOL_ICON_PATH'])\n# INVISIBLE_ICON = QIcon('%s/invisible.png' % os.environ['TOOL_ICON_PATH'])\n\nVISIBLE_ICON = QIcon('%s/visible.png' % 'test')\nINVISIBLE_ICON = QIcon('%s/invisible.png' % 'test')\n\n\nclass Gofer(widgets.Window, plugins.Tool):\n def __init__(self):\n label = 'Gofer'\n\n plugins.Tool.__init__(self, 'goferTool')\n widgets.Window.__init__(self, label)\n\n self.setLabel(label)\n self.setPips(['xlrd'])\n self.setMenuPath('The Crew')\n\n self.__targetData = None\n\n self._settingAction = QAction('Asset File Settings', self)\n self._reloadAction = QAction('Reload Asset Data', self)\n\n self._partTree = PartTree(self.__targetData)\n self.loadConfigBtn = QPushButton('Load Configuration')\n\n self.buildUI()\n self.connectUI()\n\n def buildUI(self):\n menuBar = QMenuBar(self)\n optionMenu = menuBar.addMenu('Options')\n optionMenu.addAction(self._settingAction)\n optionMenu.addAction(self._reloadAction)\n\n btnLayout = QHBoxLayout()\n btnLayout.addWidget(self.loadConfigBtn)\n\n layout = QVBoxLayout()\n layout.setMenuBar(menuBar)\n layout.addWidget(self._partTree)\n layout.addLayout(btnLayout)\n self.setLayout(layout)\n\n def connectUI(self):\n self.loadConfigBtn.clicked.connect(self.loadConfig)\n self._settingAction.triggered.connect(self.showSettings)\n self._reloadAction.triggered.connect(self.loadAssetData)\n\n def start(self, *args):\n if self.__targetData is None:\n self.__targetData = TargetData()\n self.__targetData.findTargetNode()\n widgets.Window.show(self)\n plugins.Tool.start(self, *args)\n\n def showSettings(self):\n settings = TargetDataSettings(self.__targetData)\n settings.SettingsSaved.connect(self.loadAssetData)\n settings.show()\n\n def loadAssetData(self):\n if not self.__targetData.value(self.__targetData.storedFileAttr):\n return\n\n try:\n self.__targetData.parseFile()\n except:\n traceback.print_exc()\n system.LOG.error('An error occurred when loading asset data. See script editor for details.')\n widgets.error('Something went wrong when loading asset data. See script editor for details.')\n\n self._partTree.clear()\n self._partTree.setColumnCount(0)\n headerLbls = ['', ''] + self.__targetData.partHeaders()\n self._partTree.setHeaderLabels(headerLbls)\n self._partTree.setSortingEnabled(False)\n\n for p in self.__targetData.partList():\n item = PartItem(p, headerLbls)\n self._partTree.addTopLevelItem(item)\n\n for g in p.geometry():\n geoItem = GeometryItem(g)\n item.addChild(geoItem)\n\n for i in range(len(headerLbls)):\n self._partTree.resizeColumnToContents(i)\n\n self._partTree.setSortingEnabled(True)\n\n def loadConfig(self):\n ui = ConfigSelector(self.__targetData)\n if not ui.exec_():\n return\n\n # Rip through the selected configuration and apply it.\n toLoad = []\n if ui.standardParts():\n system.LOG.info('Including Standard Parts')\n toLoad += ui.activeConfig().standardParts()\n\n if ui.optionParts():\n system.LOG.info('Including Optional Parts')\n toLoad += ui.activeConfig().optionalParts()\n\n if ui.accessoryParts():\n system.LOG.info('Including Accessory Parts')\n toLoad += ui.activeConfig().accessoryParts()\n\n # Unfortunately, we have to run through parts twice. Once to turn\n # it all off, once to turn parts back on. Because parts can share\n # geometry, if we do it all at once, we could have an incorrect result.\n for i in range(self._partTree.topLevelItemCount()):\n item = self._partTree.topLevelItem(i)\n item.setVisible(False)\n\n for i in range(self._partTree.topLevelItemCount()):\n item = self._partTree.topLevelItem(i)\n if item.part() not in toLoad:\n continue\n\n item.setVisible(True)\n\n\nclass ConfigSelector(QDialog):\n def __init__(self, target):\n QDialog.__init__(self)\n self.setWindowTitle('Load Configuration')\n self._activeConfig = None\n\n self.__target = target\n self._configTree = widgets.Tree()\n self._configTree.setHeaderLabels(target.configHeaders())\n self._okBtn = QPushButton('Load')\n\n self._stndChk = QCheckBox('Standard')\n self._optChk = QCheckBox('Options')\n self._accChk = QCheckBox('Accessories')\n\n self.buildUI()\n self.connectUI()\n self.loadConfigs()\n\n def buildUI(self):\n layout = QVBoxLayout()\n\n btnLayout = QHBoxLayout()\n btnLayout.addStretch()\n btnLayout.addWidget(self._okBtn)\n\n partGrp = QGroupBox('Parts')\n partLayout = QHBoxLayout()\n partLayout.addWidget(self._stndChk)\n partLayout.addWidget(self._optChk)\n partLayout.addWidget(self._accChk)\n partGrp.setLayout(partLayout)\n\n layout.addWidget(self._configTree)\n layout.addWidget(partGrp)\n layout.addLayout(btnLayout)\n\n self.setLayout(layout)\n\n def connectUI(self):\n self._okBtn.clicked.connect(self.accept)\n self._configTree.doubleClicked.connect(self.accept)\n\n def accept(self):\n if not self._configTree.selectedItems():\n return\n self._activeConfig = self._configTree.selectedItems()[0].config()\n QDialog.accept(self)\n\n def loadConfigs(self):\n self._configTree.clear()\n self._configTree.setSortingEnabled(False)\n\n for c in self.__target.configList():\n config = ConfigTreeItem(c, self.__target.configHeaders())\n self._configTree.addTopLevelItem(config)\n\n for i in range(len(self.__target.configHeaders())):\n self._configTree.resizeColumnToContents(i)\n\n self._configTree.setSortingEnabled(True)\n\n def activeConfig(self):\n return self._activeConfig\n\n def standardParts(self):\n return self._stndChk.isChecked()\n\n def optionParts(self):\n return self._optChk.isChecked()\n\n def accessoryParts(self):\n return self._accChk.isChecked()\n\n\nclass ConfigTreeItem(QTreeWidgetItem):\n def __init__(self, config, headerList):\n QTreeWidgetItem.__init__(self)\n self.__config = config\n self.__colKeys = headerList\n\n def data(self, column, role):\n if role == Qt.DisplayRole or role == Qt.EditRole:\n return self.__config.value(self.__colKeys[column])\n return QTreeWidgetItem.data(self, column, role)\n\n def config(self):\n return self.__config\n\n\nclass PartTree(widgets.Tree):\n def __init__(self, target):\n widgets.Tree.__init__(self)\n self.setSelectionMode(QAbstractItemView.ExtendedSelection)\n\n self._addGeoAction = QAction('Add Selected Geometry', self)\n self._addGeoAction.triggered.connect(self.addGeometry)\n self._remGeoAction = QAction('Remove Selected Geometry', self)\n self._remGeoAction.triggered.connect(self.removeGeometry)\n\n self.__target = target\n\n def buildContextMenu(self, menu):\n self._addGeoAction.setEnabled(bool(cmds.ls(sl=True)))\n menu.addAction(self._addGeoAction)\n self._remGeoAction.setEnabled(bool(self.selectedItems()))\n menu.addAction(self._remGeoAction)\n\n widgets.Tree.buildContextMenu(self, menu)\n\n def addGeometry(self):\n for sl in self.selectedItems():\n if not isinstance(sl, PartItem):\n continue\n\n for obj in cmds.ls(sl=True):\n if sl.part().addGeometry(obj):\n geoItem = GeometryItem(obj)\n sl.addChild(geoItem)\n\n def removeGeometry(self):\n for sl in self.selectedItems():\n if not isinstance(sl, GeometryItem):\n continue\n\n if sl.parent().part().removeGeometry(sl.text(0)):\n sl.parent().takeChild(sl.parent().indexOfChild(sl))\n\n def mousePressEvent(self, event):\n pos = event.pos()\n column = self.columnAt(pos.x())\n item = self.itemAt(pos)\n\n if event.button() == Qt.LeftButton:\n if isinstance(item, PartItem):\n if column == item.VisColumn:\n self.toggleVisible(item)\n return\n\n widgets.Tree.mousePressEvent(self, event)\n\n def toggleVisible(self, item):\n visible = item.part().isVisible()\n if item.isSelected():\n for sl in self.selectedItems():\n sl.setVisible(not visible)\n\n else:\n item.setVisible(not visible)\n\n\nclass PartItem(QTreeWidgetItem):\n VisColumn = 1\n\n def __init__(self, part, headerList):\n QTreeWidgetItem.__init__(self)\n\n self.__part = part\n self.__colKeys = headerList\n self.__visIcons = [INVISIBLE_ICON, VISIBLE_ICON]\n self.__isVisible = self.__part.isVisible()\n\n def data(self, column, role):\n if role == Qt.DecorationRole:\n if column == self.VisColumn:\n return self.__visIcons[int(self.isVisible())]\n\n if role == Qt.DisplayRole or role == Qt.EditRole:\n return self.__part.value(self.__colKeys[column])\n\n if role == Qt.SizeHintRole:\n if column == 0:\n return QSize(40, 20)\n\n return QTreeWidgetItem.data(self, column, role)\n\n def part(self):\n return self.__part\n\n def setVisible(self, visible):\n if not self.part().setVisible(visible):\n return\n\n self.__isVisible = visible\n self.emitDataChanged()\n\n def isVisible(self):\n return self.part().isVisible()\n\n\nclass GeometryItem(QTreeWidgetItem):\n def __init__(self, geo):\n QTreeWidgetItem.__init__(self)\n self.__geo = geo\n\n def data(self, column, role):\n if role == Qt.DisplayRole or role == Qt.EditRole:\n if column == 0:\n self.setFirstColumnSpanned(True)\n return self.__geo\n\n return QTreeWidgetItem.data(self, column, role)\n\n\nclass TargetDataSettings(widgets.Window):\n SettingsSaved = Signal()\n\n def __init__(self, target):\n self.__target = target\n\n widgets.Window.__init__(self, 'Asset Data Settings')\n self.setWindowModality(Qt.ApplicationModal)\n\n self.selectedFileEdit = QLineEdit(target.value(target.storedFileAttr))\n self.selectedFileEdit.setReadOnly(True)\n self.browseBtn = QPushButton('Load File')\n self.selectedSheetEdit = QLineEdit(target.value(target.sheetTitleAttr))\n\n # Column Ranges\n self.idColEdit = QLineEdit(target.value(target.idColAttr))\n self.dataColStartEdit = QLineEdit(target.value(target.dataStartColAttr))\n self.dataColEndEdit = QLineEdit(target.value(target.dataEndColAttr))\n\n self.configColStartEdit = QLineEdit(target.value(target.configStartColAttr))\n self.configColEndEdit = QLineEdit(target.value(target.configEndColAttr))\n target = TargetData()\n\n # Row Ranges\n self.headerRowEdit = QLineEdit(target.value(target.headerDataRowAttr))\n self.configHeaderStartEdit = QLineEdit(target.value(target.configDataStartRowAttr))\n self.configHeaderEndEdit = QLineEdit(target.value(target.configDataEndRowAttr))\n self.partStartRowEdit = QLineEdit(target.value(target.partStartRowAttr))\n self.partEndRowEdit = QLineEdit(target.value(target.partEndRowAttr))\n\n self.saveBtn = QPushButton('Save Settings')\n self.saveBtn.setMaximumWidth(200)\n\n self.buildUI()\n self.connectUI()\n\n def buildUI(self):\n layout = QVBoxLayout()\n browseLayout = QHBoxLayout()\n\n browseLayout.addWidget(self.selectedFileEdit)\n browseLayout.addWidget(self.browseBtn)\n\n sheetGrp = QGroupBox('Sheet')\n sheetLayout = QFormLayout()\n sheetLayout.addRow('Configuration Sheet Title', self.selectedSheetEdit)\n sheetGrp.setLayout(sheetLayout)\n\n\n dataColGrp = QGroupBox('Data Columns')\n dataColLayout = QFormLayout()\n dataColLayout.addRow('ID Column', self.idColEdit)\n dataColLayout.addRow('Data Start Column', self.dataColStartEdit)\n dataColLayout.addRow('Data End Column', self.dataColEndEdit)\n dataColGrp.setLayout(dataColLayout)\n\n configColGrp = QGroupBox('Configuration Columns')\n configColLayout = QFormLayout()\n configColLayout.addRow('Config Start Column', self.configColStartEdit)\n configColLayout.addRow('Config End Column', self.configColEndEdit)\n configColGrp.setLayout(configColLayout)\n\n dataLayout = QHBoxLayout()\n dataLayout.addWidget(dataColGrp)\n dataLayout.addWidget(configColGrp)\n\n dataRowGrp = QGroupBox('Data Rows')\n rowDataLayout = QHBoxLayout()\n leftFormLayout = QFormLayout()\n leftFormLayout.addRow('Header Data Row', self.headerRowEdit)\n leftFormLayout.addRow('Part Start Row', self.partStartRowEdit)\n leftFormLayout.addRow('Part End Row', self.partEndRowEdit)\n\n rightFormLayout = QFormLayout()\n rightFormLayout.addRow('Config Header Start Row', self.configHeaderStartEdit)\n rightFormLayout.addRow('Config Header End Row', self.configHeaderEndEdit)\n\n rowDataLayout.addLayout(leftFormLayout)\n rowDataLayout.addLayout(rightFormLayout)\n dataRowGrp.setLayout(rowDataLayout)\n\n layout.addLayout(browseLayout)\n layout.addWidget(sheetGrp)\n layout.addLayout(dataLayout)\n layout.addWidget(dataRowGrp)\n layout.addWidget(self.saveBtn)\n layout.setAlignment(self.saveBtn, Qt.AlignHCenter)\n layout.setStretch(2, 100)\n self.setLayout(layout)\n\n def connectUI(self):\n self.browseBtn.clicked.connect(self.loadFile)\n self.saveBtn.clicked.connect(self.saveAndClose)\n\n def loadFile(self):\n newFile = QFileDialog.getOpenFileName(widgets.mayaMainWindow(), 'Load File',\n filter='Excel Files (*xls *.xlsx)')\n newFile = newFile[0]\n if not newFile:\n return\n\n if newFile.lower() == self.selectedFileEdit.text().lower():\n return\n\n self.selectedFileEdit.setText(newFile)\n\n def saveAndClose(self):\n self.__target = TargetData()\n self.__target.setValue(self.__target.storedFileAttr, self.selectedFileEdit.text())\n self.__target.setValue(self.__target.sheetTitleAttr, self.selectedSheetEdit.text())\n self.__target.setValue(self.__target.dataStartColAttr, self.dataColStartEdit.text().strip())\n self.__target.setValue(self.__target.dataEndColAttr, self.dataColEndEdit.text().strip())\n self.__target.setValue(self.__target.configStartColAttr, self.configColStartEdit.text().strip())\n self.__target.setValue(self.__target.configEndColAttr, self.configColEndEdit.text().strip())\n self.__target.setValue(self.__target.headerDataRowAttr, self.headerRowEdit.text().strip())\n self.__target.setValue(self.__target.partStartRowAttr, self.partStartRowEdit.text().strip())\n self.__target.setValue(self.__target.partEndRowAttr, self.partEndRowEdit.text().strip())\n self.__target.setValue(self.__target.configDataStartRowAttr, self.configHeaderStartEdit.text().strip())\n self.__target.setValue(self.__target.configDataEndRowAttr, self.configHeaderEndEdit.text().strip())\n self.__target.setValue(self.__target.idColAttr, self.idColEdit.text().strip())\n\n self.SettingsSaved.emit()\n self.close()\n\n\nclass TargetData(object):\n targetNodeName = 'GoferTarget'\n storedFileAttr = 'activeFile'\n sheetTitleAttr = 'activeSheet'\n dataStartColAttr = 'dataStartColumn'\n dataEndColAttr = 'dataEndColumn'\n configStartColAttr = 'configStartColumn'\n configEndColAttr = 'configEndColumn'\n headerDataRowAttr = 'headerDataRow'\n partStartRowAttr = 'partStartRow'\n partEndRowAttr = 'partEndRow'\n configDataStartRowAttr = 'configDataStartRow'\n configDataEndRowAttr = 'configDataEndRow'\n idColAttr = 'idColumn'\n\n def __init__(self):\n object.__init__(self)\n self.__targetNodeUUID = None\n self.__workbook = None\n self.__assetSheet = None\n self.__idLabel = None\n self.__partList = []\n self.__partHeaders = []\n self.__configList = []\n self.__configHeaders = []\n\n self.findTargetNode()\n\n def findTargetNode(self):\n if not cmds.ls(self.targetNodeName, r=True):\n target = cmds.createNode('transform', name=self.targetNodeName)\n self.__targetNodeUUID = cmds.ls(target, uuid=True)[0]\n\n else:\n self.__targetNodeUUID = cmds.ls(self.targetNodeName, r=True, uuid=True)[0]\n\n attrList = [self.storedFileAttr, self.sheetTitleAttr, self.dataStartColAttr, self.dataEndColAttr,\n self.configStartColAttr, self.configEndColAttr, self.headerDataRowAttr,\n self.partStartRowAttr, self.partEndRowAttr, self.configDataStartRowAttr,\n self.configDataEndRowAttr, self.idColAttr]\n\n for a in attrList:\n self.setValue(a, None)\n\n def targetNode(self, name=False):\n try:\n if name:\n return cmds.ls(self.__targetNodeUUID, l=True)[0]\n return self.__targetNodeUUID\n except Exception as e:\n system.LOG.error(e)\n return None\n\n def value(self, attr):\n if cmds.objExists('%s.%s' % (self.targetNode(True), attr)):\n return cmds.getAttr('%s.%s' % (self.targetNode(True), attr))\n return ''\n\n def setValue(self, attr, value):\n scene.addAttr(self.targetNode(True), attr, value, scene.AttributeTypes.String,\n keyable=False, hidden=False, lock=False)\n\n def parseFile(self):\n import xlrd\n system.LOG.debug('Parsing File: %s' % self.value(self.storedFileAttr))\n\n self.__workbook = xlrd.open_workbook(self.value(self.storedFileAttr))\n self.__assetSheet = self.__workbook.sheet_by_name(self.value(self.sheetTitleAttr))\n self.__partList = []\n\n # Build config list\n configHeaderStart = int(self.value(self.configDataStartRowAttr)) - 1\n configHeaderEnd = int(self.value(self.configDataEndRowAttr))\n configColStart = utilities.col2num(self.value(self.configStartColAttr)) - 1\n configColEnd = utilities.col2num(self.value(self.configEndColAttr))\n\n for col in range(configColStart, configColEnd):\n config = Configuration(self)\n\n # Get config information\n for row in range(configHeaderStart, configHeaderEnd):\n config.setValue(str(row), self.__assetSheet.cell(row, col).value)\n\n self.__configList.append(config)\n\n # Build config headers\n for row in range(configHeaderStart, configHeaderEnd):\n self.__configHeaders.append(str(row))\n\n # Set part ID label\n dataRow = int(self.value(self.headerDataRowAttr)) - 1\n idCol = utilities.col2num(self.value(self.idColAttr)) - 1\n self.__idLabel = self.__assetSheet.cell(dataRow, idCol).value\n\n # Build part list\n dataRow = int(self.value(self.headerDataRowAttr)) - 1\n partRowStart = int(self.value(self.partStartRowAttr)) - 1\n partRowEnd = int(self.value(self.partEndRowAttr))\n partColStart = utilities.col2num(self.value(self.dataStartColAttr)) - 1\n partColEnd = utilities.col2num(self.value(self.dataEndColAttr))\n\n for row in range(partRowStart, partRowEnd):\n part = Part(self)\n for col in range(partColStart, partColEnd):\n header = self.__assetSheet.cell(dataRow, col).value\n if header:\n # Mark the value of the ID\n if header == self.__idLabel:\n part.setValue(part.InternalKey, self.__assetSheet.cell(row, col).value)\n\n # Set every other value\n part.setValue(header, self.__assetSheet.cell(row, col).value)\n\n self.__partList.append(part)\n\n # Loop through all configs and link the parts accordingly\n for col in range(configColStart, configColEnd):\n avail = self.__assetSheet.cell(row, col).value\n\n # The configs should be built in order\n self.__configList[col - configColStart].addPart(part, avail)\n\n # Build part headers\n for col in range(partColStart, partColEnd):\n self.__partHeaders.append(self.__assetSheet.cell(dataRow, col).value)\n\n def assetSheet(self):\n return self.__assetSheet\n\n def workbook(self):\n return self.__workbook\n\n def partList(self):\n return self.__partList\n\n def partHeaders(self):\n return self.__partHeaders\n\n def configList(self):\n return self.__configList\n\n def configHeaders(self):\n return self.__configHeaders\n\n def idLabel(self):\n return self.__idLabel\n\n\nclass dataObject(object):\n def __init__(self):\n object.__init__(self)\n\n self.__attr = {}\n\n def value(self, attr):\n if attr in self.__attr:\n return self.__attr[attr]\n return ''\n\n def setValue(self, attr, value):\n self.__attr[attr] = value\n\n def allAttrs(self):\n return self.__attr\n\n\nclass Part(dataObject):\n Delimiter = ','\n InternalKey = '_internalID'\n\n def __init__(self, target):\n dataObject.__init__(self)\n self.__target = target\n self.__geometry = []\n self.__isDirty = True\n\n def __contains__(self, item):\n return self.value(self.InternalKey) == item\n\n def addGeometry(self, obj):\n try:\n idLabel = self.__target.idLabel()\n if not idLabel:\n system.LOG.error('Unable to add \"%s\" to part, no ID column set.' % obj)\n return False\n\n partValue = self.value(idLabel)\n idLabel = 'ID_%s' % idLabel\n\n scene.addAttr(obj, idLabel, None, scene.AttributeTypes.String)\n currentValues = cmds.getAttr('%s.%s' % (obj, idLabel))\n\n # I think I found a little bug! Apparently, if a string attribute is empty,\n # Maya wants to return a None instead of an empty string. Which means we\n # have to an extra check. Yay.\n\n # TODO: Clean this up, you lazy bastard.\n if not currentValues:\n currentValues = ''\n\n currentValues = currentValues.split(self.Delimiter)\n if partValue in currentValues:\n return False\n\n currentValues.append(partValue)\n scene.addAttr(obj, idLabel, self.Delimiter.join(currentValues), scene.AttributeTypes.String)\n self.__isDirty = True\n return True\n\n except Exception as e:\n traceback.print_exc()\n system.LOG.error('Unable to add \"%s\" to part, see script editor for details.' % obj)\n return False\n\n def removeGeometry(self, obj):\n try:\n idLabel = self.__target.idLabel()\n if not idLabel:\n system.LOG.error('Unable to remove \"%s\" to part, no ID column set.' % obj)\n return False\n\n partValue = self.value(idLabel)\n idLabel = 'ID_%s' % idLabel\n\n if not cmds.objExists('%s.%s' % (obj, idLabel)):\n return True\n\n currentValues = cmds.getAttr('%s.%s' % (obj, idLabel))\n if not currentValues:\n return True\n\n currentValues = currentValues.split(self.Delimiter)\n if partValue in currentValues:\n currentValues.remove(partValue)\n\n scene.addAttr(obj, idLabel, self.Delimiter.join(currentValues), scene.AttributeTypes.String)\n self.__isDirty = True\n return True\n\n except Exception as e:\n traceback.print_exc()\n system.LOG.error('Unable to remove \"%s\" from part, see script editor for details.' % obj)\n return False\n\n def geometry(self):\n if not self.__isDirty:\n return self.__geometry\n\n try:\n idLabel = self.__target.idLabel()\n if not idLabel:\n system.LOG.error('Unable to get geometry, no ID column set.')\n return False\n\n partValue = self.value(idLabel)\n idLabel = 'ID_%s' % idLabel\n\n self.__geometry = scene.findNodesByAttrValue(idLabel, partValue, scene.AttributeTypes.String, True)\n self.__isDirty = False\n return self.__geometry\n\n except Exception as e:\n traceback.print_exc()\n system.LOG.error('Unable to get geometry, see script editor for details.')\n self.__isDirty = True\n return []\n\n def isVisible(self):\n geoList = self.geometry()\n if not geoList:\n return False\n\n for g in geoList:\n if cmds.getAttr('%s.v' % g):\n return True\n\n return False\n\n def setVisible(self, visible):\n geoList = self.geometry()\n if not geoList:\n return False\n\n for g in geoList:\n cmds.setAttr('%s.v' % g, visible)\n\n return True\n\n\nclass Availability(objects.Enum):\n Standard = 's'\n Option = 'a'\n Accessory = 'acc'\n\n\nclass Configuration(dataObject):\n def __init__(self, target):\n dataObject.__init__(self)\n self.__target = target\n self.__standard = []\n self.__optional = []\n self.__accessory = []\n self.__isDirty = True\n\n def addPart(self, part, availability):\n availability = availability.lower().strip()\n\n if availability == Availability.Standard:\n self.__standard.append(part)\n return\n\n if availability == Availability.Option:\n self.__optional.append(part)\n return\n\n if availability == Availability.Accessory:\n self.__accessory.append(part)\n return\n\n def standardParts(self):\n return self.__standard\n\n def optionalParts(self):\n return self.__optional\n\n def accessoryParts(self):\n return self.__accessory\n\n\nplugins.registerPlugin(Gofer)","sub_path":"python/tools/gofer.py","file_name":"gofer.py","file_ext":"py","file_size_in_byte":26965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"7909202","text":"#!/usr/bin/env python3\n\nimport sys\nimport PIL.Image\nimport PIL.ImageChops\nfrom typing import Iterable\n\n\ndef fit_width(img, expected_width: int):\n 'Filter out identical columns from the image to fit the expected width.'\n img = img.transpose(PIL.Image.TRANSPOSE)\n pixels = bytes(img.getdata())\n columns = [\n pixels[offset:offset + img.width]\n for offset in range(0, len(pixels), img.width)\n ]\n for threshold in 100, 80, 60, 40, 20, 15, 10, 5, 4, 3, 2:\n columns = unique(columns, threshold)\n if len(columns) <= expected_width:\n padding_width = expected_width - len(columns)\n columns.insert(0, padding_width * columns[0])\n img.putdata(b''.join(columns))\n img = img.transpose(PIL.Image.TRANSPOSE)\n return img.crop((0, 0, expected_width, img.height))\n\n\ndef unique(columns: Iterable, threshold: int=0):\n 'Remove adjacent duplicates that repeat more than given number of times.'\n new_columns = []\n for column in columns:\n if not new_columns or new_columns[-1] != column:\n same_columns = 0\n elif same_columns == threshold:\n continue\n else:\n same_columns += 1\n new_columns.append(column)\n return new_columns\n\n\ndef join_img(head, *tail, vertical=False):\n 'Stack multiple images into a single one, horizontally or vertically.'\n for img in tail:\n if vertical:\n offset = (0, head.height)\n new_size = max(head.width, img.width), head.height + img.height\n else:\n offset = (head.width, 0)\n new_size = head.width + img.width, max(head.height, img.height)\n head = head.crop((0, 0, *new_size))\n head.paste(img, offset)\n return head\n\n\ndef decompose(img):\n 'Break down the image into parts.'\n components = {\n 'header_left': (0, 0, 800, 240),\n 'header_right': (img.width - 80, 0, img.width, 240),\n 'body': (0, 240, img.width, img.height - 30),\n 'body_left': (0, 240, 75, img.height - 30),\n 'body_right': (75, 240, img.width, img.height - 30),\n 'footer': (0, img.height - 30, img.width, img.height),\n }\n return type('parts', (), {k: img.crop(v) for k, v in components.items()})\n\n\ndef main(path):\n 'Trim whitespaces in different parts of the image and join them back.'\n img = PIL.Image.open(path)\n parts = decompose(img)\n header = join_img(parts.header_left, parts.header_right, vertical=False)\n min_footer_width = PIL.ImageChops.invert(parts.footer).getbbox()[2]\n assert min_footer_width <= header.width, 'Footer too long'\n footer = parts.footer.crop((0, 0, header.width, parts.footer.height))\n expected_body_right = header.width - parts.body_left.width\n parts.body_right = fit_width(parts.body_right, expected_body_right)\n assert parts.body_right, 'Diagram too large'\n body = join_img(parts.body_left, parts.body_right, vertical=False)\n join_img(header, body, footer, vertical=True).save(path)\n\n\nif __name__ == '__main__':\n for path in sys.argv[1:]:\n try:\n main(path)\n except AssertionError as e:\n print(path, e.args[0])\n","sub_path":"trim.py","file_name":"trim.py","file_ext":"py","file_size_in_byte":3182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"73378812","text":"from datetime import datetime\nfrom sqlalchemy.types import TypeDecorator, TEXT\nimport json\nimport parsedatetime\n\n\ndef parse_human_datetime(s):\n \"\"\"\n Use the parsedatetime lib to return ``datetime.datetime`` from human\n generated strings\n\n >>> parse_human_datetime(\"now\") <= datetime.now()\n True\n \"\"\"\n cal = parsedatetime.Calendar()\n return dttm_from_timtuple(cal.parse(s)[0])\n\n\ndef dttm_from_timtuple(d):\n return datetime(\n d.tm_year, d.tm_mon, d.tm_mday, d.tm_hour, d.tm_min, d.tm_sec)\n\n\ndef parse_human_timedelta(s):\n \"\"\"\n Use the parsedatetime lib to return ``datetime.datetime`` from human\n generated strings\n\n >>> parse_human_datetime(\"now\") <= datetime.now()\n True\n \"\"\"\n cal = parsedatetime.Calendar()\n dttm = dttm_from_timtuple(datetime.now().timetuple())\n d = cal.parse(s, dttm)[0]\n d = datetime(\n d.tm_year, d.tm_mon, d.tm_mday, d.tm_hour, d.tm_min, d.tm_sec)\n return d - dttm\n\n\n\nclass JSONEncodedDict(TypeDecorator):\n \"\"\"Represents an immutable structure as a json-encoded string.\"\"\"\n impl = TEXT\n def process_bind_param(self, value, dialect):\n if value is not None:\n value = json.dumps(value)\n\n return value\n\n def process_result_value(self, value, dialect):\n if value is not None:\n value = json.loads(value)\n return value\n","sub_path":"panoramix/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"223072704","text":"\nimport platform\n\nfrom qtpy import QtWidgets, QtGui, QtCore\n\nfrom ztp.trader.vtGlobal import globalSetting\nfrom ztp.trader.vtFunction import loadIconPath\n\n\nif not hasattr(QtCore, 'pyqtSignal'):\n QtCore.pyqtSignal = QtCore.Signal\n\n\nBASIC_FONT = None\ntry:\n family = globalSetting['fontFamily']\n size = globalSetting['fontSize']\n BASIC_FONT = QtGui.QFont(family, size)\nexcept:\n BASIC_FONT = QtGui.QFont(u'微软雅黑', 12)\n\ndef createQApp():\n \"\"\"创建PyQt应用对象\"\"\"\n qApp = QtWidgets.QApplication([])\n\n if globalSetting['darkStyle']:\n try:\n import qdarkstyle\n qApp.setStyleSheet(qdarkstyle.load_stylesheet_from_environment())\n except ImportError:\n pass\n\n if 'Windows' in platform.uname():\n import ctypes\n ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID('ztp')\n\n qApp.setFont(BASIC_FONT)\n\n qApp.setWindowIcon(QtGui.QIcon(loadIconPath('ztp.ico')))\n\n return qApp\n","sub_path":"ztp/trader/uiQt.py","file_name":"uiQt.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"138084911","text":"\nimport re\nthislist=['p']\n# Complete the solve function below.\ndef solve(s):\n n=[]\n l=len(s)\n n.insert(0,s[0].upper())\n for i in range(1,l):\n if s[i-1]==\" \":\n n.insert(i,s[i].upper())\n else:\n n.insert(i,s[i])\n n=\"\".join(n)\n return n\n\n\n","sub_path":"capitalize.py","file_name":"capitalize.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"1210660","text":"import redis\nimport json\nimport time\nfrom queue_functions import *\n\nzmienna = \"wartosc\"\n\ndef test_co():\n # queuing_system_client = redis.StrictRedis(host='localhost', charset='utf-8', port=6381, db=1)\n queuing_system_client = redis.StrictRedis(host='localhost', charset='utf-8', port=6381, db=0)\n # queuing_system_client.flushdb()\n queue_to_write_to = \"the_queue\"\n\n message_to_write_to_queue_as_dict = {}\n message_to_write_to_queue_as_dict[\"pole1\"] = \"wartosc1\"\n message_to_write_to_queue_as_dict[\"pole2\"] = \"wartosc2\"\n\n queuing_system_client.rpush(queue_to_write_to, json.dumps(message_to_write_to_queue_as_dict))\n queuing_system_client.rpush(queue_to_write_to, json.dumps(message_to_write_to_queue_as_dict))\n\n queue_to_read_from = \"the_queue\"\n queue_batch = queuing_system_client.lrange(queue_to_read_from, 0, 1)\n for value_read_from_queue in queue_batch:\n value_read_from_queue_as_dict = json.loads(value_read_from_queue)\n print('value_read_from_queue_as_dict: ', value_read_from_queue_as_dict)\n\n\nif __name__ == \"__main__\":\n # test_co()\n\n queue_name = \"queue_1\"\n\n for i in range(3):\n dummy_dict = get_dummy_randomized_dict()\n push_dict_to_queue(dummy_dict, queue_name)\n\n printout_queue(queue_name)\n print()\n\n pull_whole_queue_v2(queue_name)\n printout_queue(queue_name)\n print()\n\n time.sleep(120)\n\n printout_queue(queue_name)\n print()\n clear_whole_db()\n\n for i in range(4):\n dummy_dict = get_dummy_randomized_dict(i)\n push_dict_to_queue(dummy_dict, queue_name)\n\n printout_queue(queue_name)\n print()\n clear_whole_db()\n\n for i in range(5):\n dummy_dict = get_dummy_randomized_dict()\n push_dict_to_queue(dummy_dict, queue_name)\n\n printout_queue(queue_name)\n print()\n whole_queue_batch = pull_whole_queue(queue_name)\n printout_queue(queue_name)\n print(\"whole_queue_batch: \", whole_queue_batch)\n","sub_path":"wtiproj/wtiproj01_client.py","file_name":"wtiproj01_client.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"464491414","text":"import requests\nfrom greent import node_types\nfrom greent.graph_components import KNode, LabeledID\nfrom greent.service import Service\nfrom greent.util import Text, LoggingUtil\nimport logging,json\n\nlogger = LoggingUtil.init_logging(__name__, logging.DEBUG)\n\nclass MyChem(Service):\n\n def __init__(self, context):\n super(MyChem, self).__init__(\"mychem\", context)\n\n def get_adverse_events(self,drug_node):\n #Don't need to worry about paging in this one, since we'll just return one drug (the one we're asking for)\n #and mychem pages by drug.\n chemblids = drug_node.get_synonyms_by_prefix('CHEMBL')\n if len(chemblids) == 0:\n logger.warn('no chembl ids')\n return_results = []\n for cid in chemblids:\n ident = Text.un_curie(cid)\n murl = f'{self.url}query?q=chembl.molecule_hierarchy.molecule_chembl_id:{ident}&fields=aeolus'\n result = requests.get(murl).json()\n for hit in result['hits']:\n #import json\n #print(json.dumps(hit,indent=4))\n if 'aeolus' in hit:\n aeolus = hit['aeolus']\n if 'outcomes' in aeolus:\n for outcome in aeolus['outcomes']:\n #I think it makes sense to do some filtering here. I don't want anything unless the lower\n # CI bound is > 1, and if I have enough counts (at least 5)\n if outcome['case_count'] <=5:\n continue\n if min(outcome['prr_95_ci']) > 1:\n predicate = LabeledID(identifier=\"RO:0003302\",label= \"causes_or_contributes_to\")\n elif max(outcome['prr_95_ci']) < 1:\n predicate = LabeledID(identifier=\"RO:0002559\",label= \"prevents\")\n else:\n continue\n meddra_id = f\"MedDRA:{outcome['meddra_code']}\"\n obj_node = KNode(meddra_id, type=node_types.DISEASE_OR_PHENOTYPE, name=outcome['name'])\n props={'prr':outcome['prr'], 'ror': outcome['ror'], 'case_count': outcome['case_count']}\n edge = self.create_edge(drug_node, obj_node, 'mychem.get_adverse_events', cid, predicate, url = murl, properties=props)\n return_results.append( (edge, obj_node) )\n if 'indications' in aeolus:\n for indication in aeolus['indications']:\n if indication['count'] < 25:\n continue\n predicate = LabeledID(identifier=\"RO:0002606\", label = \"treats\")\n meddra_id = f\"MedDRA:{outcome['meddra_code']}\"\n obj_node = KNode(meddra_id, type=node_types.DISEASE_OR_PHENOTYPE, name=outcome['name'])\n edge = self.create_edge(drug_node, obj_node, 'mychem.get_adverse_events', cid, predicate, url = murl, properties=props)\n return_results.append( (edge, obj_node) )\n return return_results\n\n def get_drugcentral(self,drug_node):\n #Don't need to worry about paging in this one, since we'll just return one drug (the one we're asking for)\n #and mychem pages by drug.\n chemblids = drug_node.get_synonyms_by_prefix('CHEMBL')\n if len(chemblids) == 0:\n logger.warn('no chembl ids')\n return_results = []\n for cid in chemblids:\n ident = Text.un_curie(cid)\n murl = f'{self.url}query?q=chembl.molecule_hierarchy.molecule_chembl_id:{ident}&fields=drugcentral'\n result = requests.get(murl).json()\n for hit in result['hits']:\n if 'drugcentral' in hit:\n dc = hit['drugcentral']\n for ci in dc['drug_use']['contraindication']:\n if 'umls_cui' not in ci:\n continue\n predicate = LabeledID(identifier=\"DrugCentral:0000001\", label=\"contraindication\")\n umls = f\"UMLS:{ci['umls_cui']}\"\n obj_node = KNode(umls, type=node_types.DISEASE_OR_PHENOTYPE, name=ci['concept_name'])\n edge = self.create_edge(drug_node, obj_node, 'mychem.get_drugcentral', cid, predicate, url=murl )\n return_results.append( (edge, obj_node) )\n for ind in dc['drug_use']['indication']:\n if 'umls_cui' not in ind:\n continue\n predicate = LabeledID(identifier=\"RO:0002606\", label=\"treats\")\n umls = f\"UMLS:{ind['umls_cui']}\"\n obj_node = KNode(umls, type=node_types.DISEASE_OR_PHENOTYPE, name=ind['concept_name'])\n edge = self.create_edge(drug_node, obj_node, 'mychem.get_drugcentral', cid, predicate, url = murl)\n return_results.append( (edge, obj_node) )\n return return_results\n\n def query(self,url):\n result = requests.get(url).json()\n return result\n\n def page_calls(self,url,nper):\n newurl=url+f'&size={nper}'\n response = self.query(newurl)\n if 'hits' not in response:\n return []\n all_hits = response['hits']\n num_hits = response['total']\n while len(all_hits) < num_hits:\n lall = len(all_hits)\n print(lall, num_hits)\n url_page = newurl+f'&from={len(all_hits)}'\n response = self.query(url_page)\n if 'hits' in response:\n all_hits += response['hits']\n return all_hits\n\n def get_drug_from_adverse_events(self,input_node):\n \"\"\"Given a node (drug or phenotype), find chemicals that have a high or low rate of causing the node\n concept as an adverse event\"\"\"\n meddras = input_node.get_labeled_ids_by_prefix('MedDRA')\n return_results = []\n for meddra in meddras:\n mname = meddra.label\n murl = f'{self.url}query?q=aeolus.outcomes.name:{mname}'\n hits = self.page_calls(murl,100)\n for hit in hits:\n #import json\n #print(json.dumps(hit,indent=4))\n if 'aeolus' in hit:\n aeolus = hit['aeolus']\n for outcome in aeolus['outcomes']:\n #I think it makes sense to do some filtering here. I don't want anything unless the lower\n # CI bound is > 1, and if I have enough counts (at least 5)\n if (outcome['name'] != mname):\n continue\n print(outcome['name'], outcome['case_count'], outcome['prr_95_ci'])\n if outcome['case_count'] > 5 and min(outcome['prr_95_ci']) > 1:\n predicate = LabeledID(identifier=\"RO:0003302\", label=\"causes_or_contributes_to\")\n elif outcome['case_count'] > 5 and max(outcome['prr_95_ci']) < 1:\n predicate = LabeledID(identifier=\"RO:0002559\", label=\"prevents\")\n else:\n continue\n drug_node=self.make_drug_node(hit)\n if drug_node is None:\n continue\n #obj_node = KNode(meddra_id, type=node_types.DISEASE_OR_PHENOTYPE, name=outcome['name'])\n props={'prr':outcome['prr'], 'ror': outcome['ror'], 'case_count': outcome['case_count']}\n edge = self.create_edge(drug_node, input_node, 'mychem.get_adverse_events', mname , predicate, url = murl, properties=props)\n return_results.append( (edge, drug_node) )\n return return_results\n\n def make_drug_node(self,hit_element):\n \"\"\"Given a 'hit' from the mychem result, construct a drug node. Try to get it with chembl, and if that\n fails, chebi. Failing that, complain bitterly.\"\"\"\n if 'chembl' in hit_element:\n chembl=hit_element['chembl']\n return KNode(f\"CHEMBL:{chembl['molecule_chembl_id']}\", type=node_types.DRUG, name=chembl['pref_name'])\n if 'chebi' in hit_element:\n chebi = hit_element['chebi']\n return KNode(chebi['chebi_id'], type=node_types.DRUG, name=chebi['chebi_name'])\n logger.error('hit from mychem.info did not return a chembl or a chebi element')\n logger.error(f'got these keys: {list(hit_element.keys())}')\n return None\n\n","sub_path":"greent/services/mychem.py","file_name":"mychem.py","file_ext":"py","file_size_in_byte":8737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"120226403","text":"\"\"\"Run a chain of analysis tools and linters: isort → black → pylint → bandit.\"\"\"\n\nfrom subprocess import run\n\n\ndef run_with_separator(args):\n \"\"\"Print a horizontal rule to console and run a subprocess.\"\"\"\n print(\"=\" * 80)\n return run(args, check=False)\n\n\nif __name__ == \"__main__\":\n isort = run_with_separator(\n [\n \"isort\",\n \".\",\n \"--atomic\",\n \"--combine-as\",\n \"--combine-star\",\n \"--multi-line=3\",\n \"--trailing-comma\",\n \"--force-grid-wrap=0\",\n \"--use-parentheses\",\n \"--line-width=88\",\n ]\n )\n\n if isort.returncode == 0:\n black = run_with_separator([\"black\", \".\"])\n\n if black.returncode == 0:\n run_with_separator(\n [\"pylint\", \"--extension-pkg-whitelist=lxml.etree\", \"plateypus\"]\n )\n run_with_separator(\n [\"bandit\", \"--recursive\", \"--format\", \"txt\", \"plateypus\"]\n )\n","sub_path":"scripts/lint.py","file_name":"lint.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"482157072","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom haversine import Unit, haversine\n\npd.set_option(\"display.max_rows\", 500)\npd.set_option(\"display.max_columns\", 500)\npd.set_option(\"display.width\", 500)\n\nDATA_TAXI = r\"C:\\Users\\alewz\\Google Drive\\programming\\projects_al\\first_pytourch\\PYTORCH_NOTEBOOKS\\Data\\NYCTaxiFares.csv\"\n\n# variables ############################################################################################################\n\ncols_number: list[str] = [\n \"pickup_longitude\",\n \"pickup_latitude\",\n \"dropoff_longitude\",\n \"dropoff_latitude\",\n \"passenger_count\",\n \"distance\",\n]\ncols_categories: list[str] = [\n \"hours\",\n \"am_or_pm\",\n \"day_of_week\",\n]\n\n# features engineering #################################################################################################\n\nif __name__ == \"__main__\":\n taxi = pd.read_csv(DATA_TAXI)\n print(taxi.head())\n print(taxi.dtypes)\n\n # haversine distance from log and lat\n\n taxi[\"pickup\"] = list(zip(taxi[\"pickup_longitude\"], taxi[\"pickup_latitude\"]))\n taxi[\"dropoff\"] = list(zip(taxi[\"dropoff_longitude\"], taxi[\"dropoff_latitude\"]))\n\n vhaversine = np.vectorize(haversine)\n taxi[\"distance\"] = vhaversine(\n taxi.pickup.values, taxi.dropoff.values, unit=Unit.MILES\n )\n\n # parsing datetime\n taxi[\"pickup_datetime\"] = pd.to_datetime(taxi[\"pickup_datetime\"])\n taxi[\"pickup_datetime\"] = taxi[\"pickup_datetime\"] - pd.Timedelta(hours=4)\n taxi[\"hours\"] = taxi[\"pickup_datetime\"].dt.hour\n taxi[\"am_or_pm\"] = np.where(taxi[\"hours\"] > 12, \"pm\", \"am\")\n taxi[\"day_of_week\"] = taxi[\"pickup_datetime\"].dt.strftime(\"%a\")\n\n for col in cols_categories:\n taxi[col] = taxi[col].astype(\"category\")\n y_col = [\"fare_amount\"]\n\n features = taxi[cols_categories + cols_number + y_col]\n target: pd.Series = taxi[y_col]\n\n print(features.dtypes)\n print(features.head())\n\n taxi.to_pickle(\"taxi_cleaned\")\n\n sns.distplot(target)\n plt.show()\n print(target.describe())\n","sub_path":"nbs/pt_tabular/cleaning.py","file_name":"cleaning.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"56737527","text":"def create_nvme(self):\n '\\n Create NVMe service\\n '\n nvme_create = netapp_utils.zapi.NaElement('nvme-create')\n if (self.parameters.get('status_admin') is not None):\n options = {\n 'is-available': self.parameters['status_admin'],\n }\n nvme_create.translate_struct(options)\n try:\n self.server.invoke_successfully(nvme_create, enable_tunneling=True)\n except netapp_utils.zapi.NaApiError as error:\n self.module.fail_json(msg=('Error creating nvme for vserver %s: %s' % (self.parameters['vserver'], to_native(error))))","sub_path":"Data Set/bug-fixing-5/6661f17ce380879b015777f46338354fe10b608f--bug.py","file_name":"6661f17ce380879b015777f46338354fe10b608f--bug.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"636420042","text":"from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, redirect\n\n# Create your views here.\nfrom zoomit_accounts.models import ProfileImage\nfrom zoomit_comments.foms import UserCommentForm, UserCommentReplyForm\nfrom zoomit_comments.models import UserComment, UserCommentReply\nfrom zoomit_posts.models import Post\n\n\n@login_required(login_url='/login')\ndef post_comment(request):\n user_comment_form = UserCommentForm(request.POST or None)\n\n user_image_path: ProfileImage = ProfileImage.objects.filter(user_id=request.user.id).first()\n user_image = user_image_path.image\n\n if user_comment_form.is_valid():\n post_id = user_comment_form.cleaned_data.get('post')\n body = user_comment_form.cleaned_data.get('body')\n\n comment = UserComment.objects.create(user_id=request.user.id, post_id=post_id, body=body, user_image=user_image)\n if comment:\n post = Post.objects.get(id=post_id)\n return redirect(post.get_absolute_url())\n\n\n@login_required(login_url='/login')\ndef comment_reply(request):\n comment_reply_form = UserCommentReplyForm(request.POST or None)\n\n user_image_path: ProfileImage = ProfileImage.objects.filter(user_id=request.user.id).first()\n user_image = user_image_path.image\n\n if comment_reply_form.is_valid():\n post_id = request.POST.get('post_id')\n comment_id = request.POST.get('comment_id')\n body = comment_reply_form.cleaned_data.get('body')\n\n reply: UserCommentReply = UserCommentReply.objects.create(user_id=request.user.id, comment_id=comment_id,\n body=body, user_image=user_image)\n post = Post.objects.get(id=post_id)\n if reply:\n print(reply)\n return redirect(post.get_absolute_url())","sub_path":"zoomit_comments/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"270945300","text":"import sys\nimport subprocess\n\n\n'''\n To Do:\n 1) Add check for > 2 arguments\n'''\n\n\ndef run_update(in_alias, out_alias):\n if in_alias == \"--\" and out_alias == \"--\":\n print(\"Error: Please indicate at least one outsource repository to link with.\")\n exit(1)\n else:\n subprocess.check_call([\"bash\", \"-c\", \"./arg_tester.sh \" + in_alias + \" \" + out_alias])\n\n\nif __name__ == '__main__':\n try:\n run_update(sys.argv[1], sys.argv[2])\n except OSError as e:\n print(\"Bash script execution failed:\", e)\n","sub_path":"git_sync_v1.1.py","file_name":"git_sync_v1.1.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"597963810","text":"#!/bin/env python3\r\n\r\nimport tkinter as tk\r\nfrom cmath import *\r\nfrom math import *\r\nfrom PIL import ImageTk, Image\r\nfrom tkinter import filedialog as dial\r\nimport matplotlib\r\nmatplotlib.use(\"TkAgg\")\r\nfrom matplotlib import pyplot as plt\r\nfrom Fonction_bordure_2 import *\r\nfrom Animation import *\r\n \r\n''' TECHNIQUE DES CERCLES : pour chaque position envisageable, chercher le rayon du + grand cercle possible dans le circuuit => ligne idéeal. Test des pixels via polygones assimilables à des cercles.''' \r\n \r\n \r\n \r\nclass Car():\r\n \r\n def __init__(self, pos0, v0):\r\n self.pos = pos0\r\n self.v = v0 \r\n\r\nclass Circuit(tk.Toplevel):\r\n \r\n def __init__(self):\r\n plt.gca().invert_yaxis()\r\n self.plotx,self.ploty = [],[]\r\n \r\n tk.Toplevel.__init__(self)\r\n self.resizable(width=\"FALSE\",height=\"FALSE\")\r\n \r\n self.ButtonFrame = tk.Frame(self) # Pour les boutons\r\n \r\n self.circuit = tk.Canvas(self, width = 500, height = 300, bg=\"white\") # Création du canvas où sera dessiné le circuit\r\n self.circuit.grid(row=0,column=0)\r\n self.ButtonFrame.grid(row=1,column=0)\r\n \r\n self.carIm = None\r\n self.circuitIm = None\r\n self.v0 = 5\r\n \r\n self.error = 1 # pour la modif de vtesse si sortie de circuit\r\n \r\n self.normvit = 5\r\n self.carsize = (26,19)\r\n \r\n self.defbuttons()\r\n self.stop = False\r\n self.liste_pixel =[]\r\n \r\n self.mainloop()\r\n \r\n def defbuttons(self):\r\n # Boutons et textes d'informations\r\n self.importB = tk.Button(self.ButtonFrame, text='Importer un circuit', command=self.lecture_circuit)\r\n self.importB.grid(row=0,column=0)\r\n \r\n self.importT = tk.Label(self.ButtonFrame)\r\n self.importT.grid(row=0,column=1)\r\n \r\n self.bagnoleB = tk.Button(self.ButtonFrame, text=\"Voiture au point de départ\", command=self.bagnole, state=tk.DISABLED)\r\n self.bagnoleB.grid(row=1,column=0)#, columnspan=2)\r\n \r\n self.animB = tk.Button(self.ButtonFrame, text=\"Animation et calculs\", command=self.animation, state=tk.DISABLED)\r\n self.animB.grid(row=1,column=1)#, columnspan=2)\r\n \r\n self.stopB = tk.Button(self.ButtonFrame, text=\"Stop et afficher le graphe\", command=self.stop_graphe)\r\n self.stopB.grid(row=0, column=2)#, columnspan=2)\r\n \r\n self.bordureB = tk.Button(self.ButtonFrame, text='Afficher la chaussée', command=self.bordure, state = tk.DISABLED)\r\n self.bordureB.grid(row=1, column=2)\r\n \r\n def motion(self,event):\r\n # Fonction qui permet à l'utilisateur de déplacer la voiture\r\n x, y = event.x, event.y\r\n \r\n self.circuit.delete(\"car\") # On efface la voiture\r\n \r\n self.circuit.create_image(x, y, image=self.carIm, anchor=tk.CENTER, tags = \"car\")\r\n \r\n def placer_voiture(self,event):\r\n # Fonction qui permet à l'utilisateur de fixer la position de la voiture\r\n x, y = event.x, event.y\r\n \r\n if self.circuitPic.getpixel((x,y))[0] == 0: # On ne place la voiture que sur le circuit (pixels noirs)\r\n self.circuit.unbind('') # On désensibilise le canvas des évenements\r\n self.circuit.unbind('')\r\n \r\n self.carpos0 = complex(x,y) # On enregistre la position initiale de la voiture\r\n \r\n self.circuit.bind('', self.droitedep) # A présent on trace le vecteur vitesse initiale\r\n self.circuit.bind('', self.create_car)\r\n \r\n def droitedep(self, event):\r\n # L'utilisateur crée le vecteur vitesse intial\r\n x, y = event.x, event.y\r\n vit = complex(x-self.carpos0.real, y-self.carpos0.imag)\r\n self.v0 = rect(self.normvit, phase(vit)) # On crée un vecteur qui a une norme normvit et qui pointe dans la bonne direction\r\n \r\n self.circuit.delete(\"v0\") # On affiche le vecteur\r\n self.circuit.create_line(self.carpos0.real, self.carpos0.imag, self.carpos0.real + self.v0.real*10, self.carpos0.imag+self.v0.imag*10, fill=\"red\", tags = \"v0\", arrow = \"last\")\r\n \r\n self.circuit.delete(\"car\") \r\n \r\n # On tourne la voiture\r\n carPicrot = Image.new(\"RGBA\", (self.carsize[0],self.carsize[0]))\r\n im = self.carPic.convert('RGBA')\r\n rot = im.rotate(-degrees(phase(self.v0)), expand=True)\r\n carPicrot.paste(rot, ((self.carsize[0]-rot.size[0])//2, (self.carsize[0]-rot.size[1])//2), rot )\r\n self.carIm = ImageTk.PhotoImage(carPicrot)\r\n \r\n self.circuit.create_image(self.carpos0.real, self.carpos0.imag, image=self.carIm, anchor=tk.CENTER, tags = \"car\")\r\n \r\n def create_car(self, event):\r\n # On crée la voiture\r\n self.circuit.unbind('') # On désensibilise le canvas des évenements\r\n self.circuit.unbind('')\r\n \r\n self.car = Car(self.carpos0, self.v0)\r\n self.bagnoleB.config(state=tk.DISABLED)\r\n self.animB.config(state=tk.NORMAL)\r\n \r\n def bagnole(self):\r\n # Importation de la voiture\r\n self.circuit.delete(\"v0\") # On efface les vecteurs vitesses (si c'est pas la première fois qu'on place la voiture\r\n \r\n self.carPic = Image.open(\"car.gif\").resize((self.carsize[0],self.carsize[1]),Image.ANTIALIAS)\r\n self.carIm = ImageTk.PhotoImage(self.carPic) # Importation de l'image redimensionnée\r\n \r\n self.importB.config(state=tk.DISABLED) # On ne peut plus changer de circuit\r\n \r\n self.circuit.bind('', self.motion) # On rend le canvas 'sensible' aux déplacements de souris\r\n self.circuit.bind('', self.placer_voiture) # On rend le canvas 'sensible' aux clicks de souris\r\n \r\n \r\n def lecture_circuit(self):\r\n # On lit l'image\r\n file = dial.askopenfile(mode='r',filetypes=[('images JPG', '.jpg')])\r\n try:\r\n self.circuitPic = Image.open(file.name)\r\n self.circuitIm = ImageTk.PhotoImage(self.circuitPic)\r\n #self.bordure()\r\n self.importT.config(text=\"Patientez...\") # On avertit l'utilisateur que ça risque de prendre du temps...\r\n self.update()\r\n self.trace_circuit()\r\n except AttributeError:\r\n pass\r\n else:\r\n self.importT.config(text=file.name) # On avertit l'utilisateur que c'est bon en affichant le nom de l'image\r\n \r\n def trace_circuit(self):\r\n # Trace circuit sur canvas\r\n self.circuit.config(width=self.circuitIm.width(), height = self.circuitIm.height()) # On redimmensionne le canvas\r\n \r\n self.circuit.create_image(0, 0, image=self.circuitIm, anchor=tk.NW, tags =\"circuit\")\r\n \r\n #self.bordure()\r\n \r\n self.bagnoleB.config(state=tk.NORMAL) # On peut à présent tracer la voiture\r\n self.bordureB.config(state=tk.NORMAL) # Ou encore afficher les bordures\r\n \r\n def bordure(self):\r\n for i in range(self.circuitIm.width()):\r\n self.liste_pixel.append([])\r\n for j in range(self.circuitIm.height()):\r\n self.liste_pixel[-1].append(self.circuitPic.getpixel((i,j))[0])\r\n contour(self.liste_pixel)\r\n\r\n \r\n def refresh(self):\r\n self.circuit.delete(\"car\") \r\n \r\n # On tourne la voiture\r\n carPicrot = Image.new(\"RGBA\", (self.carsize[0],self.carsize[0]))\r\n im = self.carPic.convert('RGBA')\r\n rot = im.rotate(-degrees(phase(self.car.v)), expand=True)\r\n carPicrot.paste(rot, ((self.carsize[0]-rot.size[0])//2, (self.carsize[0]-rot.size[1])//2), rot )\r\n self.carIm = ImageTk.PhotoImage(carPicrot)\r\n \r\n self.circuit.create_image(self.car.pos.real, self.car.pos.imag, image=self.carIm, anchor=tk.CENTER, tags = \"car\")\r\n \r\n def stop_graphe(self):\r\n self.stop = True\r\n \r\n def animation(self):\r\n if self.stop:\r\n self.stopB.config(state=tk.DISABLED)\r\n self.animB.config(state=tk.NORMAL)\r\n plt.show()\r\n else:\r\n self.circuit.delete(\"v0\")\r\n animer(self)\r\n self.animB.config(state=tk.DISABLED)\r\n self.after(1,self.animation)\r\n \r\nif __name__=='__main__':\r\n fen = Circuit()","sub_path":"init (2).py","file_name":"init (2).py","file_ext":"py","file_size_in_byte":8463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"574226847","text":"from Point import Point\r\nimport random\r\nimport matplotlib.pyplot as plt\r\n\r\n#判断p4是否在p1,p2,p3构成的三角形中\r\ndef istriangle(a,b,c,p):\r\n\r\n signOfTrig = (b.x - a.x)*(c.y - a.y) - (b.y - a.y)*(c.x - a.x);\r\n signOfAB = (b.x - a.x)*(p.y - a.y) - (b.y - a.y)*(p.x - a.x);\r\n signOfCA = (a.x - c.x)*(p.y - c.y) - (a.y - c.y)*(p.x - c.x);\r\n signOfBC = (c.x - b.x)*(p.y - b.y) - (c.y - b.y)*(p.x - b.x);\r\n\r\n d1 = (signOfAB * signOfTrig > 0)\r\n d2 = (signOfCA * signOfTrig > 0)\r\n d3 = (signOfBC * signOfTrig > 0)\r\n\r\n return d1 and d2 and d3\r\n\r\ndef generate_data(size,data,data_o):\r\n for i in range(size):\r\n #x=random.randint(0,100)\r\n #y=random.randint(0,100)\r\n x=random.uniform(0,100)\r\n y=random.uniform(0,100)\r\n p=Point(x,y)\r\n data_o.append(p)\r\n return data,data_o\r\n\r\ndef d(A,B,P):\r\n ans=(B.x - A.x)*(P.y - A.y) - (B.y - A.y)*(P.x - A.x)\r\n return ans\r\n\r\n\r\n\r\ndef BruteForceCH(size,data,data_o):\r\n \r\n L=[]\r\n R=[]\r\n flag = [0 for x in range(size)]\r\n\r\n for i in range(size-3):\r\n if flag[i]==1:\r\n continue\r\n for j in range(i+1,size-2):\r\n if flag[j]==1:\r\n continue\r\n for k in range(j+1,size-1):\r\n if flag[k]==1:\r\n continue\r\n for l in range(k+1,size):\r\n if flag[l]==1:\r\n continue\r\n elif istriangle(data_o[i],data_o[j],data_o[k],data_o[l]):\r\n flag[l]=1\r\n continue\r\n elif istriangle(data_o[l],data_o[j],data_o[k],data_o[i]):\r\n flag[i]=1\r\n continue\r\n elif istriangle(data_o[i],data_o[l],data_o[k],data_o[j]):\r\n flag[j]=1\r\n continue\r\n elif istriangle(data_o[i],data_o[j],data_o[l],data_o[k]):\r\n flag[k]=1\r\n for i in range(size):\r\n if flag[i]==0:\r\n data.append(data_o[i])\r\n data=sorted(data,key = lambda point: (point.x,point.y))\r\n\r\n A=data[0]\r\n B=data[-1]\r\n del data[0]\r\n del data[-1]\r\n for P in data:\r\n if(d(A,B,P)>0):\r\n L.append(P)\r\n elif(d(A,B,P)<0):\r\n R.append(P)\r\n Lr=L.reverse()\r\n \r\n # '''\r\n for p in data_o:\r\n plt.scatter(p.x, p.y, c='g', marker='.')\r\n\r\n plt.scatter(A.x, A.y, c='r', marker='.')\r\n plt.plot([A.x,R[0].x],[A.y,R[0].y], color='r')\r\n for i in range(len(R)-1):\r\n plt.scatter(R[i].x, R[i].y, c='r', marker='.')\r\n plt.plot([R[i].x,R[i+1].x],[R[i].y,R[i+1].y], color='r')\r\n plt.scatter(R[-1].x, R[-1].y, c='r', marker='.')\r\n plt.plot([R[-1].x,B.x],[R[-1].y,B.y], color='r')\r\n plt.scatter(B.x, B.y, c='r', marker='.')\r\n plt.plot([B.x,L[0].x],[B.y,L[0].y], color='r')\r\n for i in range(len(L)-1):\r\n plt.scatter(L[i].x, L[i].y, c='r', marker='.')\r\n plt.plot([L[i].x,L[i+1].x],[L[i].y,L[i+1].y], color='r')\r\n plt.scatter(L[-1].x, L[-1].y, c='r', marker='.')\r\n plt.plot([L[-1].x,A.x],[L[-1].y,A.y], color='r')\r\n plt.show()\r\n # '''\r\n \r\n\r\n \r\n","sub_path":"lab1/BruteForceCH.py","file_name":"BruteForceCH.py","file_ext":"py","file_size_in_byte":3190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"129593914","text":"import json\nfrom django.http import HttpResponseRedirect\nimport os\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom pymysql import connect\nfrom sqlalchemy import create_engine\nimport sqlalchemy as sa\nimport pandas as pd\nimport numpy as np\nimport six\nimport codecs\nfrom .charts import *\ntry:\n from io import BytesIO as IO # for modern python\nexcept ImportError:\n from io import StringIO as IO # for legacy python\nimport datetime\nimport xlsxwriter\nimport io\nfrom django.contrib.auth.decorators import login_required\nENGINE = create_engine('mysql+pymysql://root:lwydecd+20@localhost:3306/test') #创建数据库连接引擎\n# 根据用户传进来的数据创建的表\nDB_TABLE=''\ncolumn=''\nindex=''\nvalue=''\naggfunc=''\nuploadfilename=''\n\n# 此函数根据用户导入的文件来动态初始化界面\n# 1.获取用户导入的文件\n# 2.将文件以utf-8的编码形式写入新文件\n# 3.以新文件来生成��应的mysql数据库,并将相关数据传至前端,初始化界面\n@login_required\ndef index(request):\n if request.method == 'GET':\n return render(request, 'chpa_data/display.html')\n elif request.method == 'POST':\n content =request.FILES.get(\"upload\", None)\n if not content:\n return render(request, 'chpa_data/display.html', {'message': '没有上传内容','metadata':'请上传文件'})\n position = os.path.join('./upload',content.name)\n global uploadfilename\n uploadfilename=content.name\n newfile=position[0:position.rfind('.')]+'toUTF-8.csv'\n #获取上传文件的文件名,并将其存储到指定位置\n # wb+:以二进制格式打开一个文件用于读写。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。\n # 如果该文件不存在,创建新文件。一般用于非文本文件如图片等。\n storage = open(position,'wb+') #打开存储文件\n for chunk in content.chunks(): #分块写入文件\n storage.write(chunk)\n storage.close()\n #rb+:以二进制格式打开一个文件用于读写。文件指针将会放在文件的开头。一般用于非文本文件如图片等。\n f=open(position,'rb+')\n content_type=f.read()#读取文件内容,content_type为bytes类型,而非string类型\n\n # 获取文件编码方式\n source_encoding=get_file_code(content_type)\n\n # r:以只读方式打开文件。文件的指针将会放在文件的开头。这是默认模式。\n with codecs.open(position, \"r\",source_encoding) as f:\n newcontent=f.read()\n # wb:以二进制格式打开一个文件只用于写入。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。\n # 如果该文件不存在,创建新文件。一般用于非文本文件如图片等。\n with codecs.open(newfile, \"wb\") as f:\n f.write(newcontent.encode(encoding='utf-8', errors=\"ignore\"))\n file_path = newfile\n\n global DB_TABLE\n # 根据用户导入的文件名选择最后一个X.csv的X为表名,并加上``符号\n DB_TABLE = '`'+os.path.split(file_path)[-1].split('.')[0] + '`'\n hostname = '127.0.0.1'\n port = 3306\n user = 'root'\n passwd = 'lwydecd+20'\n db = 'test'\n\n M = CsvToMysql(hostname=hostname, port=port, user=user, passwd=passwd, db=db)\n metadata=M.read_csv(file_path)\n if metadata=='null':\n return render(request, 'chpa_data/display.html', {'message': uploadfilename+'为空文件!','metadata':'您上传的文件为空文件,请重新上传'})\n sql='select column_name,data_type from information_schema.columns where table_name={} '.format(DB_TABLE.replace('`',\"'\"))\n # 获取表的字段名及类型\n df=pd.read_sql_query(sql,ENGINE)\n print(\"获取表的字段及其数据类型:\")\n print(df)\n\n # 初始化前端选项\n mselect_dict,mselect_dict_value=init_html_form(df)\n\n # 初始化方法选择框\n aggfunc_select={\n '求和':'sum',\n '统计个数':'count',\n '求平均值':'mean',\n '求标准差':'std',\n '求方差':'var',\n '求中位数':'median'\n }\n context = {\n 'mselect_dict':mselect_dict, #index,column\n 'mselect_dict_value':mselect_dict_value, #value\n 'message': uploadfilename+'已上传',\n 'aggfunc_select':aggfunc_select, #运算函数\n 'metadata':metadata #元数据信息或者提示空文件\n }\n return render(request, 'chpa_data/display.html',context) #返回客户端信息\n else:\n return HttpResponseRedirect(\"不支持的请求方法\")\n# 此函数删除字符串两边的反引号``\ndef delesig(str):\n if str[0]=='`':\n str=str[1:][:-1]\n else:\n str=str\n return str\n# 此函数根据form_dict数据做对应的处理(处理为原数据或者透视数据表)\ndef get_df(form_dict, is_pivoted=True):\n sql = sqlparse(form_dict) # sql拼接\n get_originData_sql='select * from %s' %(DB_TABLE)\n df = pd.read_sql_query(sa.text(sql), ENGINE) # 将sql语句结果读取至Pandas Dataframe\n originData_df=pd.read_sql_query(sa.text(get_originData_sql), ENGINE)\n print(\"构造出来的sql语句为\"+sql)\n\n # 前端维度的选择用在透视函数的参数,数据筛选的选择用于生成df\n if is_pivoted is True:\n dimension_selected = form_dict['DIMENSION_select'][0]\n index_selected = form_dict['INDEX_select'][0]\n value_selected = form_dict['VALUE_select'][0]\n aggfunc_selected=form_dict['AGGFUNC_select'][0]\n global column\n column=delesig(dimension_selected)\n global index\n index=delesig(index_selected)\n global value\n value=delesig(value_selected)\n global aggfunc\n aggfunc=delesig(aggfunc_selected)\n # 如果df里面没有数据,那么就不能使用透视函数\n if df.empty:\n #返回一个空的DataFrame\n return pd.DataFrame()\n pivoted = pd.pivot_table(df,\n values=value,\n index=index,\n columns=column,\n aggfunc=aggfunc,\n fill_value=0)\n # 如果透视出来的dataframe为空,那么也返回一个空的dataframe\n if pivoted.empty:\n return pd.DataFrame()\n return pivoted\n else:\n return originData_df\n@login_required\n# query函数在前端选择了筛选条件之后通过前端传递过来的值进行分析,并返回json格式的结果\n# 1.解析前端参数到理想格式\n# 2.根据前端参数数据拼接SQL并用Pandas读取\n# 3.Pandas读取数据后,将前端选择的DIMENSION作为pivot_table方法的column参数\n# 4.返回Json格式的结果\n# 注:前三步交给get_df函数做了\ndef query(request):\n # six库主要是为了兼容python2和python3\n # 调用Python 2中的dictionary.iterlists() 或Python 3中的dictionary.lists()\n form_dict = dict(six.iterlists(request.GET))\n print(\"前端表单转换为字典:\")\n print(form_dict)\n pivoted = get_df(form_dict)\n df=get_df(form_dict,is_pivoted=False)\n # KPI\n # kpi = get_kpi(pivoted)\n\n # table = ptable(pivoted)\n # 透视表格\n table = pivoted.to_html(formatters=build_formatters_by_col(pivoted), # 逐列调整表格内数字格式\n classes='ui selectable striped nowrap celled table', # 指定表格css class为Semantic UI主题\n table_id='ptable' # 指定表格id\n )\n # 原数据表格\n inittable = df.to_html(#formatters=build_formatters_by_col(df), # 逐列调整表格内数字格式\n classes='ui selectable striped nowrap celled table ', # 指定表格css class为Semantic UI主题\n table_id='initdata_table' # 指定表格id\n )\n #describe和valuecounts函数转为图表\n info_chart=json.loads(prepare_chart(df, 'get_info_chart', index,column,aggfunc,value))\n # 原数据图\n origin_data_chart=json.loads(prepare_chart(df,'creat_origindata_chart',index,column,aggfunc,value))\n # 3d透视图\n pivot_chart = json.loads(prepare_chart(pivoted, 'get_pivot_chart',index,column,aggfunc,value))\n context = {\n 'ptable':table,\n \"initdata_table\":inittable,\n 'info_chart':info_chart,\n 'pivot_chart': pivot_chart,\n 'origin_data_chart':origin_data_chart\n }\n\n return HttpResponse(json.dumps(context, ensure_ascii=False), content_type=\"application/json charset=utf-8\") # 返回结果必须是json格式\ndef build_formatters_by_col(df):\n # 整数位添加千位分隔符,保留两位小数\n format_data=lambda x: '{:,.0f}'.format(x)\n d = {}\n for column in df.columns:\n d[column]=format_data\n return d\n# 下面是一个获得各个字段option_list的简单方法,在页面初始化时从后端提取所有字段的不重复值作为选项传入前端。\ndef get_distinct_list(column, db_table):\n sql = \"Select DISTINCT \" + column + \" From \" + db_table\n df = pd.read_sql_query(sql, ENGINE)\n l = df.values.flatten().tolist()\n return l\n# 构造sql语句\ndef sqlparse(context):\n sql = \"Select * from %s Where true \" % (DB_TABLE) # 构造sql语句前半段\n\n # 下面循环处理多选部分(即数据筛选部分)\n for k, v in context.items():\n # CSRF(Cross-site request forgery),中文名称:跨站请求伪造。CSRF攻击:攻击者盗用了你的身份,以你的名义发送恶意请求。\n if k not in ['csrfmiddlewaretoken', 'DIMENSION_select', 'VALUE_select', 'INDEX_select','AGGFUNC_select']:\n if k[-2:] == '[]':\n field_name = k[:-9] # 如果键以[]结尾,删除_select[]取原字段名\n else:\n field_name = k[:-7] # 如果键不以[]结尾,删除_select取原字段名\n selected = v # 选择项\n sql = sql_extent(sql, field_name, selected) #未来可以通过进一步拼接字符串动态扩展sql语句\n return sql\n\n# 通过AND关键字连接来扩展sql语句\ndef sql_extent(sql, field_name, selected, operator=\" AND \"):\n if selected is not None:\n statement = ''\n for data in selected:\n statement = statement + \"'\" + data + \"', \"\n statement = statement[:-2]\n if statement != '':\n sql = sql + operator + field_name + \" in (\" + statement + \")\"\n return sql\n\n# 可视化数据,渲染图表\ndef prepare_chart(df, # 输入经过pivoted方法透视过的df,不是原始df\n chart_type, # 图表类型字符串,人为设置,根据图表类型不同做不同的Pandas数据处理,及生���不同的Pyechart对象\n index, # 前端表单字典,用来获得一些变量作为图表的标签如单位\n column,\n agg,\n value):\n if chart_type=='get_info_chart':#渲染df.describe的出来的表格\n chart=creat_info_chart(df,index,column)\n return chart.dump_options() # 用json格式返回Pyecharts图表对象的全局设置\n elif chart_type=='get_pivot_chart':#透视图表\n chart=creat_pivot_chart(df,index,column,agg,value)\n return chart.dump_options()\n elif chart_type=='creat_origindata_chart':#原数据图表\n chart=creat_origindata_chart(df)\n return chart.dump_options()\n else:\n return None\n@login_required\n# 导出数据函数\ndef export(request, type):\n form_dict = dict(six.iterlists(request.GET))\n if type == 'pivoted':\n df = get_df(form_dict) # 透视后的数据\n sheet_name=aggfunc+'('+value+')'\n elif type == 'raw':\n df = get_df(form_dict, is_pivoted=False) # 原始数\n sheet_name='原始数据'\n excel_file = IO()\n xlwriter = pd.ExcelWriter(excel_file)\n df.to_excel(xlwriter, sheet_name=sheet_name)\n\n xlwriter.save()\n xlwriter.close()\n\n #重新设置起始位置,在这里等同于excel_file.close()\n excel_file.seek(0)\n\n # 设置浏览器mime类型\n # MIME (Multipurpose Internet Mail Extensions) 是描述消息内容类型的因特网标准。\n # MIME 消息能包含文本、图像、音频、视频以及其他应用程序专用的数据。\n response = HttpResponse(excel_file.read(),\n content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')#即为xlsx类型\n\n # 设置文件名\n now = datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\") # 当前精确时间不会重复,适合用来命名默认导出文件\n response['Content-Disposition'] = 'attachment; filename=' + now + '.xlsx'\n return response\n\n# 读取用户导入的csv文件并将其存入mysql数据库(数据清洗也在这里)\nclass CsvToMysql(object):\n def __init__(self, hostname, port, user, passwd, db):\n self.dbname = db\n self.conn = connect(host=hostname, port=port, user=user, passwd=passwd, db=db)\n self.cursor = self.conn.cursor()\n\n # 读取csv文件\n def read_csv(self,filename):\n # 判断是否为空文件\n size = os.path.getsize(filename)\n if size == 0:\n return 'null'\n # csv文件中的字段可能会有空,在读取的时候会变成nan,nan到了mysql中是没有办法处理的就会报错,\n # 所以需要加上这个keep_default_na=False,设为false后就会保留原空字符,就不会变成nan了\n df = pd.read_csv(filename, keep_default_na=False, encoding='utf-8')\n if 'Unnamed: 0' in df.columns:\n df.drop('Unnamed: 0',axis=1,inplace=True) #改变原始数据\n table_name = '`'+os.path.split(filename)[-1].split('.')[0] + '`'\n # print(\"下列语句测试构造出来的表名是否正确\")\n # print(os.path.split(filename))\n # print(os.path.split(filename)[-1])\n # print(os.path.split(filename)[-1].split('.'))\n # print(os.path.split(filename)[-1].split('.')[0])\n self.csv2mysql(db_name=self.dbname,table_name=table_name, df=df )\n buffer = io.StringIO()\n df.info(buf=buffer,memory_usage='deep')\n s =buffer.getvalue()#获取到数据的元数据\n ss=\"您导入的数据的字段信息如下:\\n\"+s[s.rfind('Range'):]\n return ss\n # pandas的数据类型和MySQL是不通用的,需要进行类型转换。字段名可能含有非法字符,需要反引号。\n def make_table_sql(self,df):\n #将csv中的字段类型转换成mysql中的字段类型\n columns = df.columns.tolist()\n make_table = []\n make_field = []\n for col in columns:\n item1 = '`'+col+'`'\n if 'int' in str(df[col].dtype):\n char = item1 + ' FLOAT'\n elif 'float' in str(df[col].dtype):\n char = item1 + ' FLOAT'\n elif 'object' in str(df[col].dtype):\n char = item1 + ' VARCHAR(255)'\n elif 'datetime' in str(df[col].dtype):\n char = item1 + ' DATETIME'\n else:\n char = item1 + ' VARCHAR(255)'\n make_table.append(char)\n make_field.append(item1)\n return ','.join(make_table), ','.join(make_field)\n\n\n def csv2mysql(self,db_name,table_name,df):\n print(\"开始构造表格:\")\n field1, field2 = self.make_table_sql(df)\n print(\"create table {} ( {})\".format(table_name,field1))\n self.cursor.execute('drop table if exists {}'.format(table_name))\n self.cursor.execute(\"create table {} ({})\".format(table_name, field1))\n values = df.values.tolist()\n s = ','.join(['%s' for _ in range(len(df.columns))])\n try:\n print(len(values[0]),len(s.split(',')))\n print ('insert into {}({}) values ({})'.format(table_name, field2, s), values[0])\n self.cursor.executemany('insert into {}({}) values ({})'.format(table_name, field2, s), values)\n except Exception as e:\n print (e.message)\n finally:\n self.conn.commit()\n\n\n# 在python中,Unicode类型是作为编码的基础类型\n# decode encode\n# str ---------> str(Unicode) ---------> str\n# 得到文件的编码方式,方便读取文件时选择对应的编码方式\ndef get_file_code(content_type):\n try:\n content_type.decode('utf-8').encode('utf-8')\n source_encoding='utf-8'\n except:\n try:\n content_type.decode('gbk').encode('utf-8')\n source_encoding='gbk'\n except:\n try:\n content_type.decode('gb2312').encode('utf-8')\n source_encoding='gb2312'\n except:\n try:\n content_type.decode('gb18030').encode('utf-8')\n source_encoding='gb18030'\n except:\n try:\n content_type.decode('big5').encode('utf-8')\n source_encoding='gb18030'\n except:\n content_type.decode('cp936').encode('utf-8')\n source_encoding='cp936'\n return source_encoding\n\n# 初始化前端表单\n\ndef init_html_form(df):\n D_screen_condition=dict(zip(df['COLUMN_NAME'],'`'+df['COLUMN_NAME']+'`'))\n #传给前端value选择的备选值,因为value只能选择数值类型的数据,选择字符数据没有意义\n df2=df[df['DATA_TYPE'].isin(['int','float'])]\n D_screen_condition2VALUE=dict(zip(df2['COLUMN_NAME'],'`'+df2['COLUMN_NAME']+'`'))\n print(\"index/column备选项为:\")\n print(D_screen_condition)\n print(\"value备选项为:\")\n print(D_screen_condition2VALUE)\n # 下面的代码负责初始化表单选项(index和column)\n mselect_dict = {}\n for key, value in D_screen_condition.items():\n #mselect_dict的key为D_screen_condition的key\n # mselect_dict的value为字典,具有select和options两个字段\n mselect_dict[key] = {}\n\n # value的select字段表示选择了数据库中的哪个属性\n mselect_dict[key]['select'] = value\n\n # value的options字段表示数据库中该属性具有的各不相同的取值\n option_list=get_distinct_list(value,DB_TABLE)\n mselect_dict[key]['options'] = option_list #以后可以后端通过列表为每个多选控件传递备选项\n # 下面单独初始化value备选框\n mselect_dict_value={}\n for key, value in D_screen_condition2VALUE.items():\n #D_MULTI_SELECT\n #mselect_dict_value的key为D_screen_condition2VALUE的key\n # mselect_dict_value的value为字典,具有select和options两个字段\n mselect_dict_value[key] = {}\n\n # value的select字段表示选择了数据库中的哪个属性\n mselect_dict_value[key]['select'] = value\n\n # value的options字段表示数据库中该属性具有的各不相同的取值\n option_list=get_distinct_list(value,DB_TABLE)\n mselect_dict_value[key]['options'] = option_list #以后可以后端通过列表为每个多选控件传递备选项\n return mselect_dict,mselect_dict_value","sub_path":"chpa_data/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":19390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"59330806","text":"# -*- coding:utf-8 -*-\nfrom sqlalchemy.ext.declarative import declarative_base # 模型繼承父類\nfrom sqlalchemy.dialects.mysql import BIGINT, VARCHAR, TINYINT, DATE, DATETIME, TEXT # 導入數據類型\nfrom sqlalchemy import Column # 指定字段\n\nBase = declarative_base()\n\n\"\"\"\nid\t\t\t編號\t\tBigInt \t\tprimary-key\nname\t\t姓名\t\tstr\t\t\tno null\njob\t\t\t職位\t\tTinyInt\t\tno null\nsex\t\t\t性別\t\tTinyInt\t\tno null\nedu\t\t\t學歷\t\tTinyInt\t\tno null\nbirth\t\t生日\t\ttime\t\tno null\nemail\t\t郵箱\t\tstr\t\t\tno null-only\nphone\t\t手機\t\tstr\t\t\tno null-only\ninfo\t\t介紹\t\ttext\t\tno null\nface\t\t頭像\t\tstr\t\t\tno null\ncreatedAt\t添加時間\tsteap time\tno null\nupdateAt\t添加時間\tsteap time\tno null\n\"\"\"\n\nclass Employee(Base):\n __tablename__ = \"employee\"\n id = Column(BIGINT, primary_key=True)\n name = Column(VARCHAR(255), nullable=False)\n job = Column(TINYINT, nullable=False)\n sex = Column(TINYINT, nullable=False)\n edu = Column(TINYINT, nullable=False)\n birth = Column(DATE, nullable=False)\n email = Column(VARCHAR(100), nullable=False, unique=True)\n phone = Column(VARCHAR(17), nullable=False, unique=True)\n info = Column(TEXT, nullable=False)\n face = Column(VARCHAR(100), nullable=False)\n createdAt = Column(DATETIME, nullable=False)\n updatedAt = Column(DATETIME, nullable=False)\n\n\n\n\"\"\"\nid\t\t\t編號\t\tBigInt \t\tprimery-key\nemployee_id\t員工編號\tBigInt\t\tno null\nhobby_key\t愛好索引\tTintInt\t\tno null\ncreatedAt\t添加時間\tsteap time\tno null\nupdateAt\t添加時間\tsteap time\tno null\n\"\"\"\nclass Hobby(Base):\n __tablename__ = \"hobby\"\n id = Column(BIGINT, primary_key=True)\n employee_id = Column(BIGINT, nullable=False)\n hobby_key = Column(TINYINT, nullable=False)\n createdAt = Column(DATETIME, nullable=False)\n updatedAt = Column(DATETIME, nullable=False)\n\n\n\"\"\"\n大賽數據模型\nid 編號 BigInt primary_key\nname 大賽名稱 str no null\nemail 郵箱 str no null-only\ncreatedAt 添加時間 steap time no null\nupdateAt 添加時間 steap time no null\n\"\"\"\nclass Game(Base):\n __tablename__ = \"game\"\n id = Column(BIGINT, primary_key=True)\n name = Column(VARCHAR(255), nullable=False)\n email = Column(VARCHAR(100), nullable=False, unique=True)\n createdAt = Column(DATETIME, nullable=False)\n updatedAt = Column(DATETIME, nullable=False)\n\n\n\n\"\"\"\n比賽數據模型\nid 編號 BigInt primary_key\ngame_id 大賽編號 BigInt no null\nname 比賽名稱 str no null\nurl 比賽網址 str no null\nimg 比賽圖片 str no null\ncreatedAt 添加時間 steap time no null\nupdateAt 添加時間 steap time no null\n\"\"\"\nclass Subgame(Base):\n __tablename__ = \"subgame\"\n id = Column(BIGINT, primary_key=True)\n game_id = Column(BIGINT, nullable=False)\n name = Column(VARCHAR(255), nullable=False)\n url = Column(VARCHAR(255), nullable=False)\n img = Column(VARCHAR(100), nullable=False)\n createdAt = Column(DATETIME, nullable=False)\n updatedAt = Column(DATETIME, nullable=False)\n\n\n \n\"\"\"\n公告數據模型\nid 編號 BigInt primary_key\ngame_id 大賽編號 BigInt no null\ntitle 公告標題 str no null\ncontent 公告內容 str no null\ncreatedAt 添加時間 steap time no null\nupdateAt 添加時間 steap time no null\n\"\"\"\nclass Message(Base):\n __tablename__ = \"message\"\n id = Column(BIGINT, primary_key=True)\n game_id = Column(BIGINT, nullable=False)\n title = Column(VARCHAR(255), nullable=False)\n content = Column(VARCHAR(255), nullable=False)\n createdAt = Column(DATETIME, nullable=False)\n updatedAt = Column(DATETIME, nullable=False)\n\n\n \n\"\"\"\n菜單數據模型\nid 編號 BigInt primary_key\ngame_id 大賽編號 BigInt no null\nitem 選項 str no null\nurl 選項連結 str no null\ncreatedAt 添加時間 steap time no null\n\"\"\"\nclass Menu(Base):\n __tablename__ = \"menu\"\n id = Column(BIGINT, primary_key=True)\n game_id = Column(BIGINT, nullable=False)\n item = Column(VARCHAR(255), nullable=False)\n url = Column(VARCHAR(255), nullable=False)\n createdAt = Column(DATETIME, nullable=False)\n\n\n\n\nif __name__ == \"__main__\":\n import mysql.connector # 導入數據庫連接驅動\n from sqlalchemy import create_engine # 導入創建引擎工具\n\n mysql_configs = dict(\n db_host=\"127.0.0.1\",\n db_name=\"crud\",\n db_port=3306,\n db_user=\"root\",\n db_pwd=\"q05qPO0nPeZrQ7J4\"\n )\n\n engine = create_engine(\n 'mysql+mysqlconnector://{db_user}:{db_pwd}@{db_host}:{db_port}/{db_name}?charset=utf8mb4'.format(\n **mysql_configs),\n encoding=\"utf-8\",\n echo=True\n )\n\n metadata = Base.metadata\n metadata.create_all(engine)\n print(\"生成成功!\")\n","sub_path":"test_flask/app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"551224634","text":"import os\nimport logging\nimport subprocess\nimport tempfile\n\nfrom profilehooks import timecall\n\nfrom audfprint.audfprint_match import Matcher\nfrom audfprint.audfprint_analyze import Analyzer\n\nLOG = logging.getLogger(__file__)\n\n\ndef to_time(sec):\n m, s = divmod(sec, 60)\n return '%02d:%02d' % (m, s)\n\n\ndef analyzer():\n\n a = Analyzer()\n a.n_fft = 512\n a.n_hop = a.n_fft / 2\n a.shifts = 4\n a.fail_on_error = False\n a.density = 20\n return a\n\n\ndef matcher():\n m = Matcher()\n m.find_time_range = True\n m.search_depth = 200\n m.verbose = True\n return m\n\n\n@timecall(immediate=True)\ndef get_offset_end(vid, hashtable):\n an = analyzer()\n match = matcher()\n\n start_time = -1\n end_time = -1\n\n t_hop = an.n_hop / float(an.target_sr)\n rslts, dur, nhash = match.match_file(an, hashtable, vid, 1) # The number does not matter...\n\n for (tophitid, nhashaligned, aligntime,\n nhashraw, rank, min_time, max_time) in rslts:\n #print(tophitid, nhashaligned, aligntime, nhashraw, rank, min_time, max_time)\n end_time = max_time * t_hop\n start_time = min_time * t_hop\n LOG.debug('Started at %s (%s) in ended at %s (%s)' % (start_time, to_time(start_time),\n end_time, to_time(end_time)))\n return start_time, end_time\n\n LOG.debug('no result just returning -1')\n\n return start_time, end_time\n\n\ndef partial_dl(part, fn, stop=6, chunk=8000):\n # this should check the parts location first imo.\n # so we read directly from file.\n stop = part.size // stop\n p = os.path.join(os.getcwd(), '%s.mkv' % fn)\n\n try:\n N = 0\n with open(part.location, 'r') as ip:\n with open(p, 'wb') as outp:\n while True:\n data = ip.read(chunk)\n N += chunk\n if stop and N > stop:\n break\n else:\n outp.write(data)\n\n return p\n except Exception as e:\n #logging.exception(e)\n print('local copy failed.')\n\n print('copy via plex')\n session = part._session\n\n url = part._server.url('%s?download=1' % part.key)\n\n r = session.get(url, stream=True)\n\n with open(p, 'wb') as handle:\n sofa = 0\n for it in r.iter_content(chunk):\n print(sofa)\n if stop and sofa > stop:\n break\n handle.write(it)\n sofa += chunk\n\n return p\n\n\ndef in_dir(root, ratingkey):\n for f in os.listdir(root):\n if ratingkey in f:\n fp = os.path.join(root, f)\n return fp\n\n\n@timecall(immediate=True)\ndef convert_and_trim(afile, fs=8000, trim=None):\n tmp = tempfile.NamedTemporaryFile(\n mode='r+b', prefix='offset_', suffix='.wav')\n tmp_name = tmp.name\n tmp.close()\n if trim is None:\n cmd = [\n 'ffmpeg', '-loglevel', 'panic', '-i', afile, '-ac', '1', '-ar',\n str(fs), '-acodec', 'pcm_s16le', tmp_name\n ]\n else:\n cmd = [\n 'ffmpeg', '-loglevel', 'panic', '-i', afile, '-ac', '1', '-ar',\n str(fs), '-ss', '0', '-t', str(trim), '-acodec', 'pcm_s16le',\n tmp_name\n ]\n\n LOG.debug('calling ffmepg with %s' % ' '.join(cmd))\n\n psox = subprocess.Popen(cmd, stderr=subprocess.PIPE)\n o, e = psox.communicate()\n if not psox.returncode == 0:\n print(e)\n raise Exception(\"FFMpeg failed\")\n\n return tmp_name\n\n\n#@timecall(immediate=True)\ndef convert_and_trim_to_mp3(afile, fs=8000, trim=None, outfile=None):\n if outfile is None:\n tmp = tempfile.NamedTemporaryFile(mode='r+b', prefix='offset_', suffix='.mp3')\n tmp_name = tmp.name\n tmp.close()\n outfile = tmp_name\n\n cmd = ['ffmpeg', '-i', afile, '-ss', '0', '-t', str(trim), '-codec:a', 'libmp3lame', '-qscale:a', '6', outfile]\n\n print('calling ffmepg with %s' % ' '.join(cmd))\n\n psox = subprocess.Popen(cmd, stderr=subprocess.PIPE)\n o,e = psox.communicate()\n if not psox.returncode == 0:\n print(e)\n raise Exception(\"FFMpeg failed\")\n\n return outfile\n\n\nif __name__ == '__main__':\n def zomg():\n print('zomg')\n ht = '' # path to db\n fp = '' # path to wav.\n from audfprint.hash_table import HashTable\n HT = HashTable(ht)\n n = get_offset_end(fp, HT)\n print(n)\n\n zomg()\n","sub_path":"misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":4452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"446366957","text":"from pathlib import Path\n\nlog_dir=\"C:\\\\airflowlogs\\\\marketvol\\\\\"\n\n\ndef parse_logs(log_fh):\n \"\"\"\n Function to parse error logs\n\n \"\"\"\n log_list = []\n for line in log_fh:\n # Filter lines starting with '[' \n if line.startswith(\"[\"):\n errorMessage=[]\n # Consider only error log message\n if line.split(\" \")[3]==\"ERROR\":\n log = line.split(\" \")[0]+\" \"+line.split(\" \")[1]+\" \"+\\\n line.split(\" \")[2]+\" \"+line.split(\" \")[3]\n\n # capture error message\n for i in range(5,len(line.split(\" \"))):\n errorMessage.append(line.split(\" \")[i])\n err_log=\" \".join(errorMessage)\n \n # append error message with rest of log message\n # For ex. [2021-03-04 01:40:18,644] {{models.py:1760}} ERROR - Bash command failed\n parsed_log=log+\" - \"+err_log\n log_list.append(parsed_log)\n\n return log_list\n\n\n# get all the logs recursively under the log folder\nfile_list = Path(log_dir).rglob('*.log')\n\n# capture the error log metrics for the airflow execution log files\nfor file in file_list:\n with open(file) as f:\n dict1=parse_logs(f)\n print(\"Error metrics for task '\"+str(file).split(\"\\\\\")[3]+\"' on date: \"+str(file).split(\"\\\\\")[4])\n print(\"The total number of errors :\" +str(len(dict1)))\n \n if len(dict1) > 0: \n print(\"Here are all the errors: \")\n for i in range(len(dict1)):\n print(dict1[i])\n print(\"----------------------------------------------------------------------------------------\")\n","sub_path":"airflow-mini-project-2/log_analyzer.py","file_name":"log_analyzer.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"294109564","text":"# 天猫和京东各种的上胸围胸罩销售比例\nfrom pandas import *\nfrom matplotlib.pyplot import *\nimport sqlite3\nimport sqlalchemy\nengine = sqlalchemy.create_engine('sqlite:///bra.sqlite')\nrcParams['font.sans-serif'] = ['SimHei']\noptions.display.float_format = '{:,.2f}%'.format\n\nsales = read_sql('select source,size2 from t_sales',engine)\n# DataFrame = table view\n# sales['source'] = Series\ntmallSize2GroupCount = sales[sales['source'] == '天猫'].groupby('size2')['size2'].count()\ntmallSize2Total = tmallSize2GroupCount.sum()\nprint(tmallSize2Total)\n# 将Series转换为DataFrame\ntmallSize2 = tmallSize2GroupCount.to_frame(name='销量')\n\ntmallSize2.insert(0,'比例',100 * tmallSize2GroupCount / tmallSize2Total)\ntmallSize2.index.names=['罩杯']\nprint(tmallSize2)\n\n# 京东\njdSize2GroupCount = sales[sales['source'] == '京东'].groupby('size2')['size2'].count()\njdSize2Total = jdSize2GroupCount.sum()\nprint(jdSize2GroupCount)\n# 将Series转换为DataFrame\njdSize2 = jdSize2GroupCount.to_frame(name='销量')\n\njdSize2.insert(0,'比例',100 * jdSize2GroupCount / jdSize2Total)\njdSize2.index.names=['罩杯']\nprint(jdSize2)\n\n#labels1 = ['A罩杯','B罩杯','C罩杯']\n#labels2 = ['A罩杯','B罩杯','C罩杯','D罩杯']\nlabels1 = []\nlabels2 = []\nlabels1 = tmallSize2.index.tolist()\nlabels2= jdSize2.index.tolist()\n\n \nfig,(ax1,ax2) = subplots(1,2,figsize=(12,6))\n'''\nax1.pie(tmallSize2['销量'],labels = labels1, autopct='%.2f%%')\nax2.pie(jdSize2['销量'],labels = labels2, autopct='%.2f%%')\nax1.legend()\nax2.legend()\nax1.set_title('天猫上胸围比例')\nax2.set_title('京东上胸围比例')\nax1.axis('equal')\nax2.axis('equal')\n'''\nintLabels = []\nfor label in labels1:\n intLabels.append(int(label))\nax1.bar(intLabels, tmallSize2['销量'])\nax2.pie(jdSize2['销量'],labels = labels2, autopct='%.2f%%')\n\nax2.legend()\nax1.set_title('天猫上胸围比例')\nax2.set_title('京东上胸围比例')\n\nax2.axis('equal')\nshow()\n","sub_path":"mybra/src/analyze/demo05.py","file_name":"demo05.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"235537642","text":"import xml.etree.cElementTree as ET\nimport requests\nimport urllib\nimport urllib.request\nimport pandas as pd\n\n# df = pd.DataFrame(columns=(\"Date\",\"Latitude\",\"Longitude\",\"DisasterType\",\"Intensity\",\"EventId\",\"Country\"))\n\ndef query_gdacs(xml_link):\n df = pd.DataFrame(columns=(\"Date\", \"Latitude\", \"Longitude\", \"DisasterType\", \"Intensity\", \"EventId\", \"Country\"))\n tree = ET.ElementTree(file=urllib.request.urlopen(xml_link))\n root = tree.getroot()\n for child in root.iter('item'):\n\n try:\n date_occur = child[6].text\n except Exception as e:\n date_occur = None\n # print(\"Date Occur\", date_occur)\n\n try:\n latitude = child[13].text.split(' ')[0]\n except Exception as e:\n latitude = None\n # print(\"Latitude: \", latitude)\n\n try:\n longitude = child[13].text.split(' ')[1]\n except Exception as e:\n longitude = None\n # print(\"Longitude: \", longitude)\n\n try:\n disastertype = child[16].text\n except Exception as e:\n disastertype = None\n # print(\"Disaster Type: \", disastertype)\n\n try:\n intensity = child[17].text\n except Exception as e:\n intensity = None\n # print(\"Intensity: \", intensity)\n\n try:\n eventId = child[22].text\n except Exception as e:\n eventId = None\n # print(\"Event Id: \", eventId)\n\n try:\n country = child[29].text\n except Exception as e:\n country = None\n # print(\"Country: \", country)\n # print('------------')\n df.loc[len(df)] = [date_occur,latitude,longitude,disastertype,intensity,eventId, country]\n return df","sub_path":"alert.py","file_name":"alert.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"331094294","text":"expressao = str(input('Insira a expressão : '))\npilha = []\nfor simbolo in expressao:\n if simbolo == '(':\n pilha.append('(')\n elif simbolo == ')':\n if len(pilha) > 0:\n pilha.pop()\n else:\n pilha.append(')')\n break\nif len(pilha) == 0:\n print('A expressão está correta')\nelse:\n print('A expressão está errada !!!')","sub_path":"Mundo3/Lista/Desafio#83.py","file_name":"Desafio#83.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"561167984","text":"import tensorflow as tf\n\nfrom utils.scheduler import Scheduler\nfrom utils.summarizer import Summarizer\n\n\nclass GlobalServer:\n def __init__(self, model, args):\n \"\"\"\n 全てのスレッドで共有するパラメータを管理するサーバー.\n :param model: モデル\n :param args: パラメータ群\n \"\"\"\n # global shared parameter vectors\n self.weights = model.model.trainable_weights\n\n # RMSPropの学習率\n self.scheduler = Scheduler(args.learn_rate, args.tmax)\n self.lr = tf.placeholder(tf.float32, [])\n\n # optimizer\n self.optimizer = tf.train.RMSPropOptimizer(learning_rate=self.lr,\n decay=args.decay,\n epsilon=0.1)\n\n # summarizer\n self.summarizer = Summarizer('../data/summaries/' + args.run_id)\n","sub_path":"src/a3c/global_server.py","file_name":"global_server.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"163634445","text":"import glob\nimport json\nimport os\nimport shutil\nimport operator\nimport sys\nimport argparse\nimport math\n\nimport numpy as np\n\n\nclass MAP_Calculator:\n\n def __init__(self, minoverlap=0.5, classes=None, noprint=False):\n\n self.MINOVERLAP = minoverlap\n self.classes = classes\n self.preds = []\n self.gts = []\n\n self.print = not noprint\n\n self.gt_counter_per_class = {}\n self.gt_counter_images_per_class = {}\n\n self.pair_count = 0\n\n self.GTS = []\n self.PRDS = dict()\n self.already_seen_classes_gt = []\n self.gt_classes = None\n self.n_classes = None\n\n self.predicteds = dict()\n\n def calculate(self):\n report = \"\"\n gt_classes = list(self.gt_counter_per_class.keys())\n self.gt_classes = sorted(gt_classes)\n self.n_classes = len(gt_classes)\n\n for class_name in self.gt_classes:\n if class_name in self.PRDS:\n self.PRDS[class_name].sort(key=lambda x:float(x['confidence']), reverse=True)\n\n sum_AP = 0.0\n ap_dictionary = {}\n lamr_dictionary = {}\n\n count_true_positives = {}\n\n for class_index, class_name in enumerate(self.gt_classes):\n count_true_positives[class_name] = 0\n\n if class_name in self.PRDS:\n dr_data = self.PRDS[class_name]\n else:\n dr_data = []\n\n nd = len(dr_data)\n tp = [0] * nd # creates an array of zeros of size nd\n fp = [0] * nd\n\n for idx, detection in enumerate(dr_data):\n file_id = detection[\"id\"]\n # assign detection-results to ground truth object if any\n # open ground-truth with that file_id\n # gt_file = TEMP_FILES_PATH + \"/\" + file_id + \"_ground_truth.json\"\n ground_truth_data = self.GTS[file_id]\n ovmax = -1\n gt_match = -1\n # load detected object bounding-box\n bb = detection[\"bbox\"]\n for obj in ground_truth_data:\n # look for a class_name match\n if obj[\"class_name\"] == class_name:\n bbgt = obj[\"bbox\"]\n # L U R D\n # 0 1 2 3\n bi = [max(bb[0], bbgt[0]), min(bb[1], bbgt[1]), min(bb[2], bbgt[2]), max(bb[3], bbgt[3])]\n iw = bi[2] - bi[0] + 1\n ih = bi[1] - bi[3] + 1\n if iw > 0 and ih > 0:\n # compute overlap (IoU) = area of intersection / area of union\n ua = (bb[2] - bb[0] + 1) * (bb[1] - bb[3] + 1) + \\\n (bbgt[2] - bbgt[0] + 1) * (bbgt[1] - bbgt[3] + 1) - iw * ih\n ov = iw * ih / ua\n if ov > ovmax:\n ovmax = ov\n gt_match = obj\n\n # assign detection as true positive/don't care/false positive\n # set minimum overlap\n min_overlap = self.MINOVERLAP\n # if specific_iou_flagged:\n # if class_name in specific_iou_classes:\n # index = specific_iou_classes.index(class_name)\n # min_overlap = float(iou_list[index])\n if ovmax >= min_overlap:\n if \"difficult\" not in gt_match:\n if not bool(gt_match[\"used\"]):\n # true positive\n tp[idx] = 1\n gt_match[\"used\"] = True\n count_true_positives[class_name] += 1\n else:\n # false positive (multiple detection)\n fp[idx] = 1\n else:\n # false positive\n fp[idx] = 1\n if ovmax > 0:\n status = \"INSUFFICIENT OVERLAP\"\n\n cumsum = 0\n for idx, val in enumerate(fp):\n fp[idx] += cumsum\n cumsum += val\n cumsum = 0\n for idx, val in enumerate(tp):\n tp[idx] += cumsum\n cumsum += val\n # print(tp)\n rec = tp[:]\n for idx, val in enumerate(tp):\n rec[idx] = float(tp[idx]) / self.gt_counter_per_class[class_name]\n # print(rec)\n prec = tp[:]\n for idx, val in enumerate(tp):\n prec[idx] = float(tp[idx]) / (fp[idx] + tp[idx])\n # print(prec)\n\n ap, mrec, mprec = voc_ap(rec[:], prec[:])\n sum_AP += ap\n text = \"{0:.2f}%\".format(ap * 100) + \" = \" + class_name + \" AP \" # class_name + \" AP = {0:.2f}%\".format(ap*100)\n \"\"\"\n Write to results.txt\n \"\"\"\n rounded_prec = ['%.2f' % elem for elem in prec]\n rounded_rec = ['%.2f' % elem for elem in rec]\n\n report += text + \"\\n Precision: \" + str(rounded_prec) + \"\\n Recall :\" + str(rounded_rec) + \"\\n\\n\\n\"\n if self.print:\n print(text + \"\\n Precision: \" + str(rounded_prec) + \"\\n Recall :\" + str(rounded_rec) + \"\\n\\n\")\n\n ap_dictionary[class_name] = ap\n\n n_images = self.gt_counter_images_per_class[class_name]\n lamr, mr, fppi = log_average_miss_rate(np.array(rec), np.array(fp), n_images)\n lamr_dictionary[class_name] = lamr\n\n mAP = sum_AP / len(self.classes)\n text = \"mAP = {0:.3f}%\".format(mAP * 100)\n report += text\n if self.print:\n print(text)\n return mAP, report\n\n def add_gt_pred_pair(self, gts, preds):\n # expects x, y, width, height, confidence, class index\n\n gt_collector = list()\n # gt_collector[\"bbox\"] = list()\n # gt_collector[\"class\"] = list()\n # gt_collector[\"used\"] = list()\n\n already_seen_classes = []\n\n for gt in gts:\n # is_difficult = False\n\n l, d, w, h = gt[0:4]\n r = l + w\n u = d + h\n\n class_index = round(float(gt[5]))\n\n if self.classes is not None:\n try:\n class_name = self.classes[class_index]\n except IndexError:\n print(class_index, self.classes)\n exit(-1)\n else:\n class_name = class_index\n\n if class_name in self.gt_counter_per_class:\n self.gt_counter_per_class[class_name] += 1\n else:\n self.gt_counter_per_class[class_name] = 1\n\n # gt_collector[\"bbox\"].append((l, u, r, d))\n # gt_collector[\"class\"].append(class_name)\n # gt_collector[\"used\"].append(False)\n\n gt_collector.append({\"class_name\":class_name, \"bbox\":(l, u, r, d), \"used\":False})\n\n if class_name not in already_seen_classes:\n if class_name in self.gt_counter_images_per_class:\n self.gt_counter_images_per_class[class_name] += 1\n else:\n # if class didn't exist yet\n self.gt_counter_images_per_class[class_name] = 1\n already_seen_classes.append(class_name)\n\n self.GTS.append(gt_collector)\n\n pred_collector = list()\n # pred_collector['confidence'] = []\n # pred_collector['bbox'] = []\n # pred_collector['id'] = []\n\n\n # TODO: optimize?\n for class_index, class_name in enumerate(self.classes):\n for pred in preds:\n l, d, w, h, confidence, pred_class_index = pred[0:6]\n pred_class_index = round(float(pred_class_index))\n r = l + w\n u = d + h\n\n if class_index == pred_class_index:\n # pred_collector['confidence'].append(confidence)\n # pred_collector['bbox'].append((l, u, r, d))\n # pred_collector['id'].append(self.pair_count)\n if not class_name in self.PRDS:\n self.PRDS[class_name] = []\n\n self.PRDS[class_name].append({\"confidence\": confidence, \"id\": self.pair_count, \"bbox\":(l, u, r, d)})\n\n # self.PRDS.append(pred_collector)\n self.pair_count += 1\n\n # TODO: sort\n\nbounding_boxes = []\n\n\ndef log_average_miss_rate(precision, fp_cumsum, num_images):\n \"\"\"\n log-average miss rate:\n Calculated by averaging miss rates at 9 evenly spaced FPPI points\n between 10e-2 and 10e0, in log-space.\n output:\n lamr | log-average miss rate\n mr | miss rate\n fppi | false positives per image\n references:\n [1] Dollar, Piotr, et al. \"Pedestrian Detection: An Evaluation of the\n State of the Art.\" Pattern Analysis and Machine Intelligence, IEEE\n Transactions on 34.4 (2012): 743 - 761.\n \"\"\"\n\n # if there were no detections of that class\n if precision.size == 0:\n lamr = 0\n mr = 1\n fppi = 0\n return lamr, mr, fppi\n\n fppi = fp_cumsum / float(num_images)\n mr = (1 - precision)\n\n fppi_tmp = np.insert(fppi, 0, -1.0)\n mr_tmp = np.insert(mr, 0, 1.0)\n\n # Use 9 evenly spaced reference points in log-space\n ref = np.logspace(-2.0, 0.0, num = 9)\n for i, ref_i in enumerate(ref):\n # np.where() will always find at least 1 index, since min(ref) = 0.01 and min(fppi_tmp) = -1.0\n j = np.where(fppi_tmp <= ref_i)[-1][-1]\n ref[i] = mr_tmp[j]\n\n # log(0) is undefined, so we use the np.maximum(1e-10, ref)\n lamr = math.exp(np.mean(np.log(np.maximum(1e-10, ref))))\n\n return lamr, mr, fppi\n\n\"\"\"\n check if the number is a float between 0.0 and 1.0\n\"\"\"\ndef is_float_between_0_and_1(value):\n try:\n val = float(value)\n if val > 0.0 and val < 1.0:\n return True\n else:\n return False\n except ValueError:\n return False\n\n\"\"\"\n Calculate the AP given the recall and precision array\n 1st) We compute a version of the measured precision/recall curve with\n precision monotonically decreasing\n 2nd) We compute the AP as the area under this curve by numerical integration.\n\"\"\"\ndef voc_ap(rec, prec):\n \"\"\"\n --- Official matlab code VOC2012---\n mrec=[0 ; rec ; 1];\n mpre=[0 ; prec ; 0];\n for i=numel(mpre)-1:-1:1\n mpre(i)=max(mpre(i),mpre(i+1));\n end\n i=find(mrec(2:end)~=mrec(1:end-1))+1;\n ap=sum((mrec(i)-mrec(i-1)).*mpre(i));\n \"\"\"\n rec.insert(0, 0.0) # insert 0.0 at begining of list\n rec.append(1.0) # insert 1.0 at end of list\n mrec = rec[:]\n prec.insert(0, 0.0) # insert 0.0 at begining of list\n prec.append(0.0) # insert 0.0 at end of list\n mpre = prec[:]\n \"\"\"\n This part makes the precision monotonically decreasing\n (goes from the end to the beginning)\n matlab: for i=numel(mpre)-1:-1:1\n mpre(i)=max(mpre(i),mpre(i+1));\n \"\"\"\n # matlab indexes start in 1 but python in 0, so I have to do:\n # range(start=(len(mpre) - 2), end=0, step=-1)\n # also the python function range excludes the end, resulting in:\n # range(start=(len(mpre) - 2), end=-1, step=-1)\n for i in range(len(mpre)-2, -1, -1):\n mpre[i] = max(mpre[i], mpre[i+1])\n \"\"\"\n This part creates a list of indexes where the recall changes\n matlab: i=find(mrec(2:end)~=mrec(1:end-1))+1;\n \"\"\"\n i_list = []\n for i in range(1, len(mrec)):\n if mrec[i] != mrec[i-1]:\n i_list.append(i) # if it was matlab would be i + 1\n \"\"\"\n The Average Precision (AP) is the area under the curve\n (numerical integration)\n matlab: ap=sum((mrec(i)-mrec(i-1)).*mpre(i));\n \"\"\"\n ap = 0.0\n for i in i_list:\n ap += ((mrec[i]-mrec[i-1])*mpre[i])\n return ap, mrec, mpre\n\n\n","sub_path":"map_calc.py","file_name":"map_calc.py","file_ext":"py","file_size_in_byte":12029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"131646685","text":"import adv_test\nimport adv\nfrom adv import *\n\ndef module():\n return G_Sarisse\n\nclass G_Sarisse(adv.Adv):\n def init(this):\n this.hits = 0\n this.buffs = adv.Buff()\n this.s2stance = 0\n\n def dmg_proc(this, name, amount):\n if name[:2] == 'x1':\n this.hits += 3\n elif name[:2] == 'x2':\n this.hits += 2\n elif name[:2] == 'x3':\n this.hits += 3\n elif name[:2] == 'x4':\n this.hits += 2\n elif name[:2] == 'x5':\n this.hits += 5\n elif name[:2] == 'fs':\n this.hits += 8\n if this.hits >= 20:\n this.hits -= 20\n adv.Buff('sylvan strength',0.02,15*1.3,wide='self').on()\n adv.Buff('sylvan crit',0.01,15*1.3,'crit','chance',wide='self').on()\n\n def s1_proc(this, e):\n buffcount = 0\n for i in this.buffs._static.all_buffs:\n if buffcount >= 7:\n break\n if i.get():\n buffcount += 1\n this.dmg_make('s1_missile*%d'%buffcount,0.77*buffcount)\n this.hits += 1 + buffcount\n \n\n def s2_proc(this, e):\n if this.s2stance == 0:\n adv.Buff('s2str',0.20,13).on()\n this.s2stance = 1\n elif this.s2stance ==0:\n log('buff','def')\n this.s2stance = 0\n\n\nif __name__ == '__main__':\n module().comment = 'c4+fs'\n conf = {}\n conf['acl'] = \"\"\"\n `s1\n `s2\n `s3\n `fs, seq=4\n \"\"\"\n adv_test.test(module(), conf, verbose=0)\n\n module().comment = ''\n conf = {}\n conf['acl'] = \"\"\"\n `s1\n `s2\n `s3\n \"\"\"\n adv_test.test(module(), conf, verbose=0)\n\n","sub_path":"adv/g_sarisse.py","file_name":"g_sarisse.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"352978513","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom common import core\nfrom application.routes import routeMap\nfrom application import configs\n\n\napp = core.Server(\n debug=configs.DEBUG,\n dbconfig=configs.DB\n)\n\n\nfor route in routeMap:\n app.router.add_route(route[0], route[1], route[2])\n\n\nif __name__ == '__main__':\n app.run()","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"143293480","text":"import thinkbayes2\nimport thinkplot\n\nclass Bulb(thinkbayes2.Suite):\n\n def __init__(self, label):\n #I assume that a light bulb is used 5 hours per day\n #A light bulb live for ~1000 hours => 200 days\n #I use the smallest increasing failure rate (1)\n self.nb_bulb = 100\n self.label = label\n pmf = thinkbayes2.MakeWeibullPmf(200, 1, self.nb_bulb, self.nb_bulb)\n thinkbayes2.Suite.__init__(self, pmf)\n\n def Likelihood(self, data, hypo):\n lam = hypo\n k = data\n like = thinkbayes2.EvalPoissonPmf(k, lam)\n\n return like\n\ndef after_two_months(bulb):\n \"\"\"\n Predict the posterior distribution after two days\n \"\"\"\n\n mix = thinkbayes2.Pmf()\n\n for lam1, prob1 in bulb.Items():\n for lam2, prob2 in bulb.Items():\n if lam1 + lam2 <= bulb.nb_bulb:\n mix.Incr(lam1 + lam2, prob1 * prob2)\n\n mix.Normalize()\n\n return mix\n\n#First month posterior distribution\nsuite = Bulb('Month 1')\nsuite.Update(3)\n#Predict failures after two months\nprediction = after_two_months(suite)\nmax_lam, max_prob = 0, 0\nfor lam, prob in prediction.Items():\n if prob > max_prob:\n max_prob = prob\n max_lam = lam\nprint(\"Maximum likelihood lambda={} with probability={}\".format(max_lam, max_prob))\nthinkplot.Pmf(prediction)\nthinkplot.Show()\n","sub_path":"code/exo74.py","file_name":"exo74.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"50455215","text":"num_lines = int(input())\n\nset_even = set()\nset_odd = set()\n\n\nfor i in range(num_lines):\n sum_chars = sum(map(ord, input()))\n div = sum_chars // (i + 1)\n\n if div % 2 == 0:\n set_even.add(div)\n else:\n set_odd.add(div)\n\nsum_even = sum(set_even)\nsum_odd = sum(set_odd)\n\n\nif sum_even == sum_odd:\n list_output = list(map(str, set_even | set_odd))\n print(', '.join(list_output))\nelif sum_odd > sum_even:\n list_output = list(map(str, set_odd - set_even))\n print(', '.join(list_output))\nelse:\n list_output = list(map(str, set_odd ^ set_even))\n print(', '.join(list_output))\n","sub_path":"SoftUni/Python Developmen/Python-Advanced/Tuples-Sets/Exercise/7_BattleOfNames.py","file_name":"7_BattleOfNames.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"498460966","text":"# =============================================================================\n# Ural Join CLI Action\n# =============================================================================\n#\n# Logic of the join CLI action enabling the user to join 2 csv files according\n# to the urls each of them contains.\n#\nimport csv\nimport sys\n\nfrom ural.cli.utils import custom_reader\nfrom ural.lru_trie import LRUTrie\n\n\ndef join_action(namespace):\n\n if namespace.large_cells:\n csv.field_size_limit(sys.maxsize)\n file1_headers, file1_position, file1_reader = custom_reader(\n namespace.file1, namespace.column1)\n file2_headers, file2_position, file2_reader = custom_reader(\n namespace.file2, namespace.column2)\n if namespace.select:\n headers = namespace.select + file2_headers\n else:\n headers = file1_headers + file2_headers\n writer = csv.writer(namespace.output)\n writer.writerow(headers)\n\n trie = LRUTrie()\n\n for line in file1_reader:\n url = line[file1_position]\n if namespace.select:\n try:\n metadata = [line[file1_headers.index(\n x)] for x in namespace.select]\n except ValueError as e:\n print(\"Woops, the header '\" + str(e)\n [1:-16] + \"' doesn't exist !\")\n sys.exit(1)\n else:\n metadata = line\n trie.set(url, metadata)\n\n for line in file2_reader:\n metadata = line\n trie_metadata = trie.match(line[file2_position])\n if trie_metadata:\n row = trie_metadata + metadata\n elif namespace.select:\n row = ['' for i in range(len(namespace.select))] + metadata\n else:\n row = ['' for i in range(len(file1_headers))] + metadata\n writer.writerow(row)\n","sub_path":"ural/cli/join.py","file_name":"join.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"470423064","text":"# this just a starter for anyone\n\nfrom tkinter import *\nfrom tkinter import messagebox as mbx\n\n\nclass basic_app:\n \n \"\"\" sets up environment \"\"\"\n \n def __init__(self, master):\n self.click = Button(master, text=\"click\", command = self.sayhello)\n self.click.grid(row=1, column=1)\n\n \n \"\"\"app functions\"\"\"\n \n def sayhello(self):\n mbx.showinfo(\"click registered\",\"hello\")\n\n\nroot = Tk()\nobj = basic_app(root)\nroot.mainloop()\n","sub_path":"simple.py","file_name":"simple.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"159879208","text":"import tensorflow.compat.v1 as tf\nimport tensorflow_hub as hub\nfrom tensorflow.compat.v1.keras import backend as K\nimport matplotlib.pyplot as plt\n\nfrom tensorflow.keras import Model, Input\nfrom tensorflow.keras.layers import add\nfrom tensorflow.keras.layers import LSTM, Embedding, Dense, TimeDistributed, Dropout, Bidirectional, Lambda, Layer\nfrom tensorflow.keras.metrics import Precision, Recall\n\nfrom ElmoLayer import ElmoLayer\n\nimport os\n\nclass Dependency_Parser:\n def __init__(self, hidden_neurons=512, epochs=1, batch_size=32, verbose=1, max_len=50, n_tags=51, load_f=False, loadFile=\"tmp/model.h5\"):\n self.hidden_neurons = hidden_neurons\n self.epochs = epochs\n self.batch_size = batch_size\n self.verbose = verbose\n\n sess = tf.Session()\n K.set_session(sess)\n\n input_text = Input(shape=(max_len,), dtype=tf.string)\n # embed = ElmoLayer()\n # embedding = embed(input_text)\n embedding = ElmoLayer()(input_text)\n\n # Don't know why but it needs initialization after ElmoLayer\n sess.run(tf.global_variables_initializer())\n sess.run(tf.tables_initializer())\n\n # print_emb = tf.Print(embedding, [embedding])\n x = Bidirectional(LSTM(units=hidden_neurons, return_sequences=True,\n recurrent_dropout=0.2, dropout=0.2))(embedding)\n # x = Bidirectional(LSTM(units=hidden_neurons, return_sequences=True,\n # recurrent_dropout=0.2, dropout=0.2))(print_emb)\n x_rnn = Bidirectional(LSTM(units=hidden_neurons, return_sequences=True,\n recurrent_dropout=0.2, dropout=0.2))(x)\n x = add([x, x_rnn]) # residual connection to the first biLSTM\n out = TimeDistributed(Dense(n_tags, activation=\"softmax\"))(x)\n\n self.model = Model(input_text, out)\n self.model.compile(optimizer=\"adam\", loss=\"sparse_categorical_crossentropy\", metrics=[\"accuracy\"])\n \n def fit(self, X_tr, y_tr, val):\n checkpoint_path = \"trained/cp.ckpt\"\n checkpoint_dir = os.path.dirname(checkpoint_path)\n cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,\n save_weights_only=True,\n verbose=1)\n\n return self.model.fit(x=X_tr, y=y_tr, batch_size=self.batch_size, epochs=self.epochs, verbose=self.verbose, validation_data=val, callbacks=[cp_callback])\n \n def evaluate(self, X_test, y_test):\n return self.model.evaluate(x=X_test, y=y_test, batch_size=self.batch_size, verbose=self.verbose)\n\n def load(self, checkpoint_path):\n return self.model.load_weights(checkpoint_path)\n\n def predict(self, x):\n return self.model.predict(x)\n \n def save(self, checkpoint=\"trained/model.ckpt\"):\n return self.model.save_weights(checkpoint)\n","sub_path":"Dependency_Parser.py","file_name":"Dependency_Parser.py","file_ext":"py","file_size_in_byte":2900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"291609366","text":"# to check if the given number is palindrome or not\n\nprint('Enter a number ')\nnum = int(input('NUMBER : '))\nrev = 0\ntemp = num\nwhile num > 0:\n b = num % 10\n rev = (rev * 10) + b\n num = num // 10\n\nif temp == rev:\n print('Palindrome')\nelse:\n print('Not a Palindrome')\n","sub_path":"basic python programmes/palindrome.py","file_name":"palindrome.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"596188809","text":"# -*- coding: utf-8 -*-\n\n'''\nCopyright (c) 2013 Colin Curtain\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\nAuthor: Colin Curtain (ccbogel)\nhttps://github.com/ccbogel/PyQDA\n'''\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nimport os\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n _fromUtf8 = lambda s: s\n\n\nclass Ui_Dialog_information(QtWidgets.QDialog):\n \"\"\"\n Dialog to display details information PyQDA development, version and license.\n \"\"\"\n\n title = \"\"\n informationText = \"\"\n Dialog_information = None\n\n def __init__(self, title, filename, parent = None):\n \"\"\" Display information text in dialog \"\"\"\n\n super(QtWidgets.QDialog, self).__init__(parent) # use this to overrride accept method\n self.title = title\n scriptdir = os.path.dirname(os.path.abspath(__file__))\n htmlFile = os.path.join(scriptdir, filename)\n try:\n with open(htmlFile, 'r') as f:\n self.informationText = f.read()\n except:\n self.informationText = \"Cannot open file.\"\n\n def accepted(self):\n \"\"\" Accepted button overridden method \"\"\"\n self.information = self.textEdit.toPlainText()\n self.information = str(self.information.toUtf8()).decode('utf-8')\n self.Dialog_information.accept()\n\n def setupUi(self, Dialog_information, ):\n self.Dialog_information = Dialog_information\n Dialog_information.setObjectName(_fromUtf8(\"Dialog_information\"))\n Dialog_information.setWindowTitle(self.title)\n Dialog_information.resize(800, 502)\n self.gridLayout = QtWidgets.QGridLayout(Dialog_information)\n self.gridLayout.setObjectName(_fromUtf8(\"gridLayout\"))\n\n self.textEdit = QtWidgets.QTextBrowser(Dialog_information)\n self.textEdit.setOpenExternalLinks(True)\n self.textEdit.setObjectName(_fromUtf8(\"textEdit\"))\n self.textEdit.setHtml(self.informationText)\n self.textEdit.setReadOnly(True)\n self.gridLayout.addWidget(self.textEdit, 0, 0)\n\n '''self.retranslateUi(Dialog_information)\n\n def retranslateUi(self, Dialog_information):\n Dialog_information.setWindowTitle(QtWidgets.QApplication.translate(\"Dialog_information\", self.title, None, QtWidgets.QApplication.UnicodeUTF8))\n'''\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n Dialog_information = QtWidgets.QDialog()\n ui = Ui_Dialog_information(\"title\", \"filename\")\n ui.setupUi(Dialog_information)\n Dialog_information.show()\n sys.exit(app.exec_())\n","sub_path":"Py3QDA-0.1/Py3QDA/Information.py","file_name":"Information.py","file_ext":"py","file_size_in_byte":3549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"31423863","text":"import requests\nfrom config import *\nfrom log import getLogger\nfrom util.http import send_http\nfrom settings import API_SHOP_INFO, SHOP_INFO_HEADERS\nfrom pkg.token_m import TokenM\n\nlogger = getLogger(__name__)\n\n\nclass ShopInfo:\n def __init__(self, shopId):\n self.shopId = shopId\n self.session = requests.session()\n self.url = API_SHOP_INFO.format(shopId)\n self.token = TokenM(shopId)\n self.homepage = \"\"\n self._fetched = False\n\n def start_request(self):\n headers = SHOP_INFO_HEADERS\n headers['Referer'] = \"http://www.dianping.com/shop/{}\".format(self.shopId)\n result = send_http(self.session,\n 'get',\n self.url,\n retries=MAX_RETRY,\n headers=headers,\n timeout=TIMEOUT,\n _token=self.token.new(),\n kind='SHOP',\n )\n if result:\n response, _, _ = result\n self.homepage = response.json()\n self._fetched = True\n logger.info(f'成功获取店铺:{self.shopId} 详情.')\n\n\nif __name__ == '__main__':\n s = ShopInfo(93600792)\n s.start_request()\n print(s.homepage)\n","sub_path":"pkg/shop_info.py","file_name":"shop_info.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"61289721","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# Es recomendable agregar la extensión jupyter-navbar. \n# > git clone https://github.com/shoval/jupyter-navbar.git \n# > cd jupyter-navbar \n# > python setup.py\n\n# # Notas\n# * Las fechas en el dataset corresponden a cuando se obtuvieron los datos.\n# * Las fechas en los ejemplos corresponden a la predicción Y\n# \n# \n# * Jorquera al parecer utiliza como Evento Crítico la concentración horaria mayor a 82 ppb. (ver pag. 3 de FORECASTING OZONE DAILY MAXIMUM LEVELS AT SANTIAGO, CHILE), en nuestro caso, un THETA > 82.\n# * O3 percentil 0.75 --> 92.0\n# * O3 percentil 0.90 --> 111.9\n# \n# \n# * Invierno: 01 de Mayo al 31 de Agosto. 05-01 - 08-31\n# * Verano: 01 de Noviembre al 31 de Marzo. 11-01 - 03-31\n# \n# * Las Condes:\n# * v2004 - v2013 (2014)\n# * mean 76.176741\n# * std 20.619374\n# * std/mean 0.2706780800717111\n# * q0.5 75.557200 -> 76\n# * q0.75 89.000000 -> 89\n# \n# * Independencia:\n# * v2009 - v2017 (2018)\n# * mean 45.802717\n# * std 15.391089\n# * std/mean 0.3360300438072265\n# * q0.5 44.195900 -> 46\n# * q0.75 54.333300 -> 56\n# \n# * Parque O'Higgins\n# * v2009 - v2017 (2018)\n# * mean 50.853193\n# * std 14.191407\n# * std/mean 0.2790661935426552\n# * q0.5 51.042400 -> 50\n# * q0.75 60.256450 -> 60\n\n# In[ ]:\n\n\n15.391089/45.802717\n\n\n# # Imports\n\n# In[ ]:\n\n\nimport numpy as np\nfrom tensorflow import set_random_seed\nnp.random.seed(123)\nset_random_seed(2)\n\nimport os\nimport sys\nimport math\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport plotly.plotly as py\nfrom plotly.offline import init_notebook_mode, enable_mpl_offline, iplot_mpl, iplot\nimport cufflinks as cf\nfrom datetime import datetime\n\nfrom ipywidgets import widgets, interactive, interact\nfrom IPython.display import Javascript, display\n\nfrom sklearn import preprocessing\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error, accuracy_score\n\nfrom keras.models import Sequential\nfrom keras.layers import Input, Dense, LSTM, Dropout, TimeDistributed\nfrom keras.models import load_model\nfrom keras.callbacks import EarlyStopping\nimport keras.backend as K\n\nfrom hashlib import md5\n\n#import utils\n#from utils import gpr_invierno, gpr_verano\n\n\n\n#init_notebook_mode(connected=True)\n#cf.go_offline(connected=True)\n#enable_mpl_offline()\nGRAPH_IS_SET = False\n\nROOT = \"./\"\nMODELS_FOLDER = \"models\"\n\ntry:\n os.mkdir(MODELS_FOLDER)\nexcept:\n pass\n\n\n# # Funciones Generales\n\n# ## get_station\n\n# In[ ]:\n\n\ndef get_station(STATION):\n if STATION == \"Las_Condes\":\n FILTER_YEARS = [2004, 2013]\n DEFAULT_THETA = 89\n elif STATION == \"Independencia\" or STATION == \"Independencia2019\":\n FILTER_YEARS = [2010, 2017]\n DEFAULT_THETA = 54\n elif STATION == \"Parque_OHiggins\" or STATION == \"POH_full\":\n FILTER_YEARS = [2009, 2016]\n DEFAULT_THETA = 60\n \n elif STATION == \"CONDES 4F\":\n FILTER_YEARS = [2008, 2013]\n DEFAULT_THETA = 84\n elif STATION == \"INDEP 4F\":\n FILTER_YEARS = [2009, 2014]\n DEFAULT_THETA = 58\n elif STATION == \"POH 4F\":\n FILTER_YEARS = [2009, 2014]\n DEFAULT_THETA = 60\n \n return STATION, FILTER_YEARS, DEFAULT_THETA\n \n\n\n# ## for_graph\n\n# In[ ]:\n\n\ndef for_graph():\n if \"-f\" not in sys.argv:\n return False\n global GRAPH_IS_SET\n if GRAPH_IS_SET == False:\n init_notebook_mode(connected=True)\n cf.go_offline(connected=True)\n enable_mpl_offline()\n GRAPH_IS_SET = True\n return True\n\n\n# ## import_dataset\n\n# In[ ]:\n\n\ndef import_dataset(STATION):\n if STATION == \"Las_Condes\" or STATION == \"CONDES 4F\":\n CSV = \"data/dump-Las_Condes_2018-04-12_230000-verano.csv\"\n \n elif STATION == \"Independencia\":\n CSV = \"data/dump-Independencia_2018-04-12_230000-verano.csv\"\n elif STATION == \"Independencia2019\":\n CSV = \"data/dump-Independencia_2019-06-14_230000-verano.csv\"\n \n elif STATION == \"Parque_OHiggins\":\n CSV = \"data/dump-Parque_OHiggins_2019-06-12_230000-verano.csv\"\n elif STATION == \"POH_full\":\n CSV = \"data/dump-Parque_OHiggins_2019-06-14_230000-verano.csv\"\n \n \n elif STATION == \"INDEP 4F\":\n CSV = \"data/dump-Independencia_2019-06-14_230000-verano.csv\"\n elif STATION == \"POH 4F\":\n CSV = \"data/dump-Parque_OHiggins_2019-06-14_230000-verano.csv\"\n \n DS = pd.read_csv(ROOT+CSV)\n #DS = pd.read_csv(ROOT+\"data/dump-Independencia_2018-04-12_230000-verano.csv\")\n DS[\"registered_on\"] = pd.to_datetime(DS[\"registered_on\"])\n DS.set_index(\"registered_on\",inplace=True)\n return DS\n\n\n# ## HourMax\n# Para obtener la hora a la que ocure el máximo de un atributo en cada día.\n\n# In[ ]:\n\n\ndef HourMax(feature,DF):\n maxf = \"hm\"+feature\n \n fecha = DF.index.hour\n fecha.tolist()\n DF[maxf] = fecha.tolist()\n \n g1 = DF.groupby(pd.Grouper(freq=\"D\"))\n \n hmaxdiaria = []\n \n for fecha, group in g1:\n group = group[[feature,maxf]].dropna()\n if not group.empty:\n ar = group.values.tolist()\n m = max(ar)\n hourmax = m[1]\n hmaxdiaria.append(hourmax)\n else:\n hmaxdiaria.append(np.nan)\n\n return np.array(hmaxdiaria)\n\n\n# ## precalcular_agregados\n# Precalcula los agregados de los datos que se tienen. \n# Esto tarda por lo que el resultado es guardado en un archivo. \n# Se ejecuta la función inmediatamente para asi tener la variable PRECALC disponible como parametro por defecto para cuando se necesite. \n\n# In[ ]:\n\n\ndef precalcular_agregados(STATION,REMAKE=False):\n try:\n if REMAKE == False:\n agdf = pd.read_csv(ROOT+\"precalcs/precalc_agregados_%s.csv\"%STATION)\n agdf = agdf[ agdf.columns[1:] ]\n else:\n pd.read_csv(\"nunca_nadie_me_encontrara.nunca_jamas\")\n except:\n df = import_dataset(STATION)\n agdf = pd.DataFrame()\n \n agregados = []\n \n # Calculo de la hora maxima de atributos en un día\n for feat in df.columns:\n maxf = \"hm\"+feat\n print(\" For: \",feat)\n hmaxdiaria = HourMax(feat, df)\n print(\" len(%s), \"%maxf, len(hmaxdiaria))\n #agregados.append( (maxf, hmaxdiaria) )\n agdf[maxf] = hmaxdiaria\n \n agdf.to_csv(ROOT+\"precalcs/precalc_agregados_%s.csv\"%STATION)\n \n \n return agdf\n\nPRECALC = precalcular_agregados(\"Las_Condes\")\n\n\n# ## precalcular_eventos\n# Precalcula los eventos criticos del ozono. \n# Un evento crítico es cuando el promedio del ozono en una ventana de 8 horas supera un valor (theta) determinado.\n# Entrega la cantidad de eventos criticos en un día y en otra columna si ocurrió un evento o no.\n\n# In[ ]:\n\n\ndef precalcular_eventos(theta,STATION,REMAKE=False):\n print(\"Precalculandos Eventos Criticos ...\")\n try:\n if REMAKE == False:\n ecdf = pd.read_csv(ROOT+\"precalcs/precalc_ECat%.2f_%s.csv\"%(theta, STATION), index_col=\"registered_on\", parse_dates=True)\n ecdf = ecdf.asfreq(\"D\")\n else:\n pd.read_csv(\"nunca_nadie_me_encontrara.nunca_jamas\")\n except:\n df = import_dataset(STATION)\n \n g1 = df.groupby(pd.Grouper(freq=\"D\"))\n \n eventos = []\n dates = []\n for fecha, group in g1:\n #print(\"========\")\n #print(group[\"O3\"])\n group = group[[\"O3\"]].dropna().asfreq(\"h\")\n count = 0\n #print(\"--\")\n \n if len(group) >= 8:\n ###print(group)\n for fecha2 in group.index[:-7]:\n i,f = fecha2, fecha2 + np.timedelta64(7,\"h\")\n gmean = group[i:f].mean()[0]\n if gmean >= theta:\n count += 1\n if count > 0:\n ec = 1\n else:\n ec = 0\n ###print(fecha, count, ec)\n eventos.append( [count, ec] )\n else:\n ###print(fecha, np.nan)\n eventos.append( [np.nan, np.nan])\n dates.append(fecha)\n eventos = np.array(eventos)\n ecdf = pd.DataFrame(eventos, columns=[\"countEC\",\"EC\"])\n ecdf[\"registered_on\"] = np.array(dates)\n ecdf = ecdf.set_index(\"registered_on\")\n #print(ecdf[\"EC\"])\n \n ecdf.to_csv(ROOT+\"precalcs/precalc_ECat%.2f_%s.csv\"%(theta, STATION), index=True)\n\n return ecdf\n\nasdf = precalcular_eventos(61,\"Independencia\",REMAKE=False)\n\n\n# ## import_merge_and_scale\n# Importa el dataset. \n# Agrega los datos AGREGADOS. Ej. la hora a la que ocurre el máximo de un atributo. \n# Y escala los datos.\n\n# In[ ]:\n\n\ndef import_merge_and_scale(Config, verbose=True, SCALE = True):\n STATION = Config[\"STATION\"]\n AGREGADOS = Config[\"AGREGADOS\"]\n TARGET = Config[\"TARGET\"]\n THETA = Config[\"THETA\"]\n PRECALC = Config[\"PRECALC\"]\n SHIFT = Config[\"SHIFT\"]\n PAST = Config[\"PAST\"]\n SCALER = Config[\"SCALER\"]\n FILTER_YEARS = Config[\"FILTER_YEARS\"]\n IMPUTATION = Config[\"IMPUTATION\"]\n if \"GRAPH\" in Config:\n GRAPH = Config[\"GRAPH\"]\n if GRAPH == True:\n for_graph()\n #PREDICTED_CE = Config[\"PREDICTED_CE\"]\n \n \n #SCALE = True\n \n vprint = print if verbose else lambda *a, **k: None\n \n \n print(\"Importing Dataset and Scaling...\")\n DS = import_dataset(STATION)\n vprint(\" Dataset imported.\")\n \n vprint(\" Adjuntando Agregados Precalculados.\")\n agregados = []\n\n if AGREGADOS == [\"ALL\"]:\n AGREGADOS = DS.columns\n \n for feat in AGREGADOS:\n maxf = \"hm\"+feat\n vprint(\" Adding \",maxf)\n hmaxdiaria = PRECALC[maxf]\n agregados.append( (maxf, hmaxdiaria) )\n \n \n \n # Agrupar por maximos diarios\n gp = DS.groupby(pd.Grouper(freq='D'))\n df = gp.aggregate(np.max)\n #df = DS#.asfreq(\"H\")\n \n #TARGET_MASK = np.isnan(df[TARGET]) != True\n \n if IMPUTATION:\n if IMPUTATION != \"mean\":\n df = df.fillna(method=IMPUTATION)\n else:\n df = df.fillna(df.mean())\n \n for name, data in agregados:\n df[name] = data.values\n \n # Agregando eventos criticos\n ecdf = precalcular_eventos(THETA, STATION, REMAKE=False)\n df = pd.concat([df, ecdf],axis=1)\n \n \n \n # Agregar si O3 > THETA\n df[\"O3btTHETA\"] = (df[\"O3\"].dropna() >= THETA)*1.\n \n #print(df)\n \n \n #Scaler - normalize\n if SCALE:\n #min_max_scaler = preprocessing.MinMaxScaler()\n #miScaler = preprocessing.StandardScaler()\n miScaler = SCALER()\n norm_data = pd.DataFrame(miScaler.fit_transform(df), columns=df.columns, index=df.index)\n #norm_data[\"EC\"] = ecdf[\"EC\"].values\n vprint(\" Dataset Scaled.\")\n else:\n #ONLY FOR TEST\n norm_data = df\n for i in range(0,5):\n print(\"\")\n print(\" ===========================================================\")\n print(\" ==================DATASET SIN ESCALAR - OJO================\")\n print(\" ===========================================================\")\n \n #Scaler para el Y\n #Yscaler = preprocessing.MinMaxScaler()\n #Yscaler = preprocessing.StandardScaler()\n Yscaler = SCALER()\n Yscaler.fit(df[TARGET].values.reshape((-1,1)))\n \n #print(norm_data[\"y\"])\n if TARGET in [\"EC\",\"O3btTHETA\"]: #Feature booleana\n norm_data[\"y\"] = df[TARGET].values.reshape((-1,1))\n else:\n norm_data[\"y\"] = Yscaler.transform( df[TARGET].values.reshape((-1,1)) )\n #print(df)\n\n #Scaler para datos horarios\n #h24scaler = preprocessing.MinMaxScaler()\n #h24scaler = preprocessing.StandardScaler()\n h24scaler = SCALER()\n ###h24scaler.fit(np.array(range(0,24),dtype=\"float64\")[:, None])\n \n # Adding agregados escalados\n ###for name, data in agregados:\n ### norm_data[name] = h24scaler.transform(data[:,None])\n ###vprint(\" Agregados escalados.\")\n \n # Adding Target\n YLABELS = []\n if SHIFT >= 0:\n # Predecir el TARGET actual o en el pasado.\n #norm_data[\"y\"] = norm_data[\"TARGET\"].shift(SHIFT)\n #TARGET_TO_SHIFT = TARGET_TO_SHIFT\n norm_data[\"y\"] = norm_data[\"y\"].shift(SHIFT)\n YLABELS.append(\"y\")\n norm_data = norm_data.drop(TARGET, axis=1)\n print(\" SHIFT==0 COLUMN %s removed from features\"%TARGET)\n vprint(\" y Target added.\")\n #elif SHIFT == -1:\n # # PREDECIR EL SIGUIENTE DÍA\n # norm_data[\"y\"] = norm_data[TARGET].shift(SHIFT)\n # YLABELS.append(\"y\")\n # vprint(\" y Target added.\")\n else: #SHIFT < -1\n # PREDECIR VARIOS DIAS HACIA ADELANTE\n #norm_data[\"y\"] = norm_data[TARGET].shift(-1)\n norm_data[\"y\"] = norm_data[\"y\"].shift(-1)\n YLABELS.append(\"y\")\n vprint(\" y Target added.\")\n M = SHIFT * -1\n if PAST is False:\n c = 1\n else:\n c = PAST*-1\n while c <= M:\n if c == 1:\n c += 1\n continue\n #norm_data[\"y\"+str(c)] = norm_data[TARGET].shift(-1*c)\n #TARGET_MASK = TARGET_MASK.shift(-1)\n norm_data[\"y\"+str(c)] = norm_data[\"y\"].shift(-1*c)\n #norm_data[\"y\"] = norm_data[\"y\"].where(TARGET_MASK)\n YLABELS.append(\"y\"+str(c))\n vprint(\" y%d Target added.\"%(c))\n c += 1\n #print(norm_data[\"y\"].copy())\n #return\n \n #norm_data = norm_data.fillna(method=\"ffill\")\n \n if FILTER_YEARS:\n i,f = FILTER_YEARS\n norm_data = norm_data[\"%d-11-01\"%i:\"%d-03-31\"%(f+1)]\n #print(norm_data)\n dataset = norm_data\n \n dataset = dataset[ (dataset.index.month>=11) | (dataset.index.month<=3) ]\n \n #dataset[\"O3\"] = dataset[\"O3\"][\"2004-11-01\":\"2004-11-05\"]\n \n \n #print(dataset)\n \n return dataset, YLABELS, Yscaler, h24scaler\n\n\n# In[ ]:\n\n\n# Alias for test\n#STATION, FILTER_YEARS, THETA = \"POH_full\", [2010, 2017], 56\n#STATION, FILTER_YEARS, THETA = \"Las_Condes\", [2004, 2013], 89\n#STATION, FILTER_YEARS, THETA = \"Parque_OHiggins\", [2009, 2017], 60\nSTATION, FILTER_YEARS, THETA = get_station(\"INDEP 4F\")\ntempConfig = {\n \"STATION\": STATION,\n \"SCALER\" : preprocessing.StandardScaler,\n \"AGREGADOS\": [],#[\"ALL\"],\n \"PRECALC\": [], #precalcular_agregados(STATION),\n \"IMPUTATION\": None,\n \"THETA\": THETA,\n \"TARGET\":\"O3\",\n \"SHIFT\":-1,\n \"PAST\":False,\n \"FILTER_YEARS\": FILTER_YEARS,\n}\nnorm_data, YLABELS, dataScaler, __scaler = import_merge_and_scale(tempConfig, SCALE = False)\noriginal = norm_data\n\n\n# In[ ]:\n\n\nfrom utils import gpr_invierno, gpr_verano\ngp = ((original[\"O3\"] )*1).groupby(gpr_verano)\nprint(gp.aggregate(np.mean))\n\ngp = ((original[\"O3\"] > 82 )*1).groupby(gpr_verano)\nprint(gp.aggregate(np.sum))\n\n\n# In[ ]:\n\n\nprint(len(original[\"O3\"]))\noriginal[\"O3\"].dropna()\nmask = np.isnan(original[\"O3\"])\noriginal[\"O3\"][mask]\noriginal[\"O3\"].quantile(0.75)\n\n\n# In[ ]:\n\n\n#STOP\n\n\n# In[ ]:\n\n\na = pd.DataFrame([1,2,3, float(\"nan\")])\nn = preprocessing.StandardScaler()\nn.fit(a)\nn.transform(a)\nm = a.where( np.isnan(a) != True)\nm\n\nb = pd.DataFrame([1,2,3,4])\nb.where( np.isnan(a) != True )\n\n\n# ## select_features\n# Crea un DataFrame que contiene sólo los atributos indicados en FEATURES o los datos que contengan un porcentaje de no nulos mayor al CUT. Cuando se utiliza el CUT, se pueden banear atributos manualmente. \n# El DataFrame devuelto contiene todas filas en donde ningún dato es no nulo.\n#Nota: CUT DIAS_DISPONIBLES PREDICTORES\n# 0.40, 3004 ['CO' 'PM10' 'O3']\n# 0.30, 2046 ['CO' 'PM10' 'PM25' 'NO' 'NOX' 'UVA' 'UVB' 'O3']\n# 0.28, 1712 ['CO' 'PM10' 'PM25' 'NO' 'NOX' 'RH' 'TEMP' 'UVA' 'UVB' 'O3']\n# 0.25, 1629 ['CO' 'PM10' 'PM25' 'NO' 'NOX' 'WD' 'RH' 'TEMP' 'WS' 'UVA' 'UVB' 'O3']\n# In[ ]:\n\n\n# Selección de atributos en base a la cantidad de ejemplos sin datos nulos que se dispondrán\ndef select_features(internalConfig, Config, verbose=True):\n dataset = internalConfig['complete_dataset']\n ylabels = internalConfig['ylabels']\n FEATURES = Config['FIXED_FEATURES']\n CUT = Config['CUT']\n BAN = Config['BAN']\n SHIFT = Config['SHIFT']\n \n vprint = print if verbose else lambda *a, **k: None\n \n print(\"Selecting features ...\")\n if FEATURES == []:\n vprint(\" Using CUT=%.2f\"%CUT)\n \n a=dataset.isna().sum()\n b=dataset.describe().iloc[0]\n cantidad = (b/(a+b))\n \n atributos = cantidad[cantidad >= CUT]\n excluidos = cantidad[cantidad < CUT]\n\n index = atributos.index.values.tolist()\n banned = []\n for b in BAN:\n if b in index:\n index.remove(b)\n banned.append(b)\n if \"hm\"+b in index:\n index.remove(\"hm\"+b)\n banned.append(\"hm\"+b)\n \n vprint(\" %i Atributos Excluidos:\"%(len(excluidos)), excluidos.index.values)\n vprint(\" %i Atributos Baneados:\"%(len(banned)),banned)\n else:\n vprint(\" Using fixed features ...\")\n if \"y\" in FEATURES:\n print(\" WARNING: 'y' no debe estar en los ATRIBUTOS, se removerá y agregara al final del dataset\")\n FEATURES.remove(\"y\")\n \n \n index = FEATURES + ylabels\n\n\n \n\n data = dataset[index]\n \n #print(data.dropna(how=\"all\"))\n #temp = data.dropna(how=\"all\")\n #temp = temp.drop(\"y\",axis=1)\n #temp.to_csv(\"data_con_nan.csv\",na_rep=\"NaN\")\n \n \n #Removiendo ejemplos con al menos un atributo NaN\n data.dropna(inplace=True)\n \n FEATURES = index\n for y in ylabels:\n FEATURES.remove(y)\n vprint(\" %i Atributos Seleccionadios:\"%(len(FEATURES)),FEATURES)\n vprint(\" Cantidad de dias totales disponibles:\",len(data))\n\n \n \n return data, FEATURES\n\n\n# In[ ]:\n\n\n#STATION, FILTER_YEARS, THETA = \"Independencia\", [2009, 2017], 56\n#STATION, FILTER_YEARS, THETA = \"Las_Condes\", [2004, 2013], 92\nSTATION, FILTER_YEARS, THETA = \"Parque_OHiggins\", [2009, 2017], 60\ntempConfig = {\n \"STATION\":STATION,\n \"SCALER\" : preprocessing.StandardScaler,\n \"IMPUTATION\" : None,# \"ffill\",\n \"AGREGADOS\":[],\n \"PRECALC\":precalcular_agregados(STATION),\n \"THETA\":THETA,\n \"TARGET\":\"O3\",\n \"SHIFT\":-1,\n \"PAST\":False,\n \"FIXED_FEATURES\":['CO', 'PM10', 'PM25', 'NO', 'NOX', 'WD', 'RH', 'TEMP', 'WS', 'UVA', 'UVB', 'O3'],\n \"CUT\": 0.41, #0.26 para 12 atributos con todos los años\n \"BAN\": [\"countEC\",\"EC\", \"O3btTHETA\"],\n \"FILTER_YEARS\" : FILTER_YEARS\n }\ntempIC={}\ntempIC[\"complete_dataset\"], tempIC[\"ylabels\"], __Yscaler, __h24scaler = import_merge_and_scale(tempConfig, verbose=False, SCALE=True)\ndata, __ = select_features(tempIC, tempConfig, verbose=True)\n\n\n# In[ ]:\n\n\ndata\n\n\n# In[ ]:\n\n\n#kk = pd.read_csv(\"data_con_nan-CUT=0.26.csv\", index_col=0, parse_dates=True)\n\n\n# In[ ]:\n\n\n#kk = data.drop(\"y\",axis=1).dropna(how=\"all\")\n#pd.read_csv(\"data_horaria_con_nan_CUT=0.55.csv\", index_col=0, parse_dates=True)\n\n\n# ## nonancheck\n# Función de control para asegurarse que no existan datos no nulos. \n# La funcion es estatica por lo que los valores deben cambiarse dentro de la función si es que se desea probar algo más.\n\n# In[ ]:\n\n\n#Asegurandose que no hayan datos faltantes\ndef nonancheck():\n STATION = \"Las_Condes\"\n tempConfig = {\n \"STATION\" : STATION,\n \"SCALER\" : preprocessing.StandardScaler,\n \"IMPUTATION\":None,\n \"FILTER_YEARS\" : [],\n \"AGREGADOS\":[], \"PRECALC\":precalcular_agregados(STATION),\n \"THETA\":61,\n \"TARGET\":\"O3\",\n \"SHIFT\":-1,\n \"PAST\":False,\n \"FIXED_FEATURES\":[],\n \"CUT\": 0.28,\n \"BAN\": [],\n \n }\n tempIC={}\n tempIC[\"complete_dataset\"], tempIC[\"ylabels\"], __Yscaler, __h24scaler = import_merge_and_scale(tempConfig, verbose=False)\n data, __ = select_features(tempIC, tempConfig, verbose=False)\n return data.isna().describe()\n\nnonancheck()\n\n\n# In[ ]:\n\n\n#PRUEBAS - CONTROL DE DATOS\ndef y_vs_target():\n STATION = \"Las_Condes\"\n tempConfig = {\n \"STATION\" : STATION,\n \"SCALER\" : preprocessing.StandardScaler,\n \"IMPUTATION\":None,\n \"FILTER_YEARS\" : [],\n \"AGREGADOS\":[], \"PRECALC\":precalcular_agregados(STATION),\n \"THETA\":61,\n \"TARGET\":\"O3\",\n \"SHIFT\":-1,\n \"PAST\":False,\n \"FIXED_FEATURES\":[],\n \"CUT\": 0.28,\n \"BAN\": [],\n \"GRAPH\":True,\n }\n tempIC={}\n tempIC[\"complete_dataset\"], tempIC[\"ylabels\"], __Yscaler, __h24scaler = import_merge_and_scale(tempConfig, verbose=False)\n data, __ = select_features(tempIC, tempConfig, verbose=False)\n \n dataset = tempIC[\"complete_dataset\"]\n TARGET = tempConfig[\"TARGET\"]\n\n oo = data[TARGET]\n yy = dataset[\"y\"]\n\n gg = pd.concat([yy,oo], axis=1)\n\n gg.iplot()\n\nif for_graph():\n y_vs_target()\n\n\n# ## obtener_secuencias\n# Obtiene las secuencias de distintos largos máximos posibles. \n# Devuelve un diccionario donde la llave es el largo de las secuencia y el valor es una lista con tuplas con la fecha inicial y final correspondiente a dicha secuencia de ese largo. \n# Ej. { 1 : [ ('2000-01-01', '2000-01-01') ], 3 : [ ('2006-01-01', '2006-01-03'), ('2006-02-03', '2006-02-05') ]}\n\n# In[ ]:\n\n\n#Obtenemos la cantidad se secuencias de distinto largo maximo que se pueden hacer con los días correlativos.\ndef obtener_secuencias(internalConfig):\n data = internalConfig[\"data\"]\n \n secuencias = {}\n fecha = data.index[0] \n fin = data.index[-1] + np.timedelta64(1,\"D\")\n largo = 0\n i = 0\n sec_i = fecha\n while True:\n i += 1\n siguiente = fecha + np.timedelta64(1,\"D\")\n if siguiente in data.index:\n fecha = siguiente\n else:\n if i not in secuencias:\n secuencias[i] = []\n secuencias[i].append( (sec_i,fecha) )\n i = 0\n f_index = data.index.get_loc(fecha) + 1\n if f_index != len(data.index):\n fecha = data.index[f_index]\n sec_i = fecha\n else:\n break\n return secuencias\n \n#SECUENCIAS = secuencias\n\n\n# ## make_examples\n# Recibe el dataset, las secuencias, la cantidad de TIMESTEP y crea los ejemplos con su correspondiente etiqueta.\n# \n# Por ejemplo, dado un TIMESTEP de 3. \n# Cada ejemplo tendrá 3 días sucesivos y una etiqueta Y asociada al ozono del día siguiente. \n# Es decir, se utilizaran los datos en el tiempo T, T-1 y T-2 para predecir el ozono en tiempo T+1: \n# (T-2),(T-1),(T) -> (T+1) \n# \n# Desde el punto de vista del dataset, corresponde a 3 filas sucesivas en orden cronológico donde la etiqueta corresponde al valor Y de la última fila.\n# \n# La función devuelve: \n# > Un array 'examples' con los ejemplos de entrenamiento con el shape (cantidad, TIMESTEP, FEATURES). \n# > Un array 'y' con los Y en un arreglo bidimensional de sólo una columna con el shape (cantidad, 1). \n# > Un array 'date_index' con las fechas asociadas la PREDICCIÓN Y. \n# \n# Los tres array estan ordenados temporalmente por lo que la date_index[i] corresponde a la etiqueta y[i] de los ejemplos examples[i]\n\n# In[ ]:\n\n\n#Haremos los distintos ejemplos segun el TIMESTEP indicado.\n#NOTA: La fecha adjunta en cada ejemplo y lista 'y' corresponde a la fecha de la predicción 'y'\n# y no a fecha de los datos, esto para poder graficar de forma ordenada despúes.\n# Es decir, para predecir el ozono en un día X, tomo los datos de los TIMESTEP dias anteriores.\ndef make_examples(internalConfig, Config, verbose=True):\n data = internalConfig[\"data\"]\n secuencias = internalConfig[\"secuencias\"]\n ylabels = internalConfig[\"ylabels\"]\n TIMESTEP = Config[\"TIMESTEP\"]\n OVERLAP = Config[\"OVERLAP\"]\n \n \n vprint = print if verbose else lambda *a, **k: None\n \n print(\"Making examples ...\")\n #TIMESTEP= 3\n #OVERLAP = True\n \n largos = list(secuencias.keys())\n largos.sort()\n examples = []\n y = []\n ylen = len(ylabels)\n \n for l in largos:\n #print(l)\n if l < TIMESTEP:\n continue\n for sec in secuencias[l]:\n inicio, fin = sec\n #print(sec)\n s = data[inicio:fin].values\n if OVERLAP:\n i = 0\n while i <= len(s) - TIMESTEP:\n c = 0\n new_example = []\n yy = []\n while c < TIMESTEP:\n new_example.append( s[i+c][:-ylen] )\n #TimeDistributed\n yy.append( s[i+c][-ylen:] )\n c+=1\n fecha = inicio + np.timedelta64(i+c, \"D\")\n ##new_example.reverse()\n examples.append( [fecha] + new_example )\n ##y.append( ( fecha, s[i+c-1][-1] ) )\n ##y.append( [ fecha] + s[i+c-1][-ylen:].tolist() )\n #TimeDistributed\n y.append(( [fecha] + yy ))\n i += 1\n else:\n i = 0\n while i <= len(s) - TIMESTEP:\n c = 0\n new_example = []\n #yy=[]\n while c < TIMESTEP:\n new_example.append( s[i][:-1] )\n #yy.append( s[i][-1] )\n c += 1\n i += 1\n fecha = inicio + np.timedelta64(i, \"D\")\n ##new_example.reverse()\n examples.append( [fecha] + new_example )\n y.append( ( fecha, s[i-1][-1] ) )\n #y.append(( [fecha] + yy ))\n #break\n #break\n\n \n print(\" Ejemplos Disponibles: , \",len(examples))\n vprint(\" len(y), \",len(y))\n\n \n # Sort by Date of prediction Y\n examples.sort()\n y.sort()\n \n \n examples = np.array(examples)\n y = np.array(y)\n \n # Get date index of all examples in order. To use it later\n date_index = y[:,0]\n vprint(\" len(date_index), \", len(date_index))\n \n \n examples = np.array( examples[:,1:].tolist() )\n y = np.array( y[:,1:].tolist() )\n #y = y[:,1:]\n \n vprint(\" examples.shape :\", examples.shape )\n vprint(\" y.shape :\", y.shape )\n return examples, y, date_index\n\n\n# ## join_index\n# Función auxiliar que se utiliza para indexar un columna de fechas a una columna de datos, y así poder asociarlas temporalmente junto con otras columnas o datasets.\n\n# In[ ]:\n\n\n# Une la fecha a un arreglo de valores, ambos de igual largo\n# Retorna un DataFrame\ndef join_index(date_index, array, label):\n date_index = date_index.reshape( (-1,1) )\n array = array.reshape( (-1,1) )\n df = pd.DataFrame(np.hstack([date_index, array]))\n df.columns = [\"fecha\",label]\n df = df.set_index(\"fecha\")\n return df\n\n\n# ## plot_y_true\n# Función estática y de control. \n# Para todos los ejemplos, grafica el valor de Y y la característica que representa en los distintos TIMESTEP. \n# Para el ejemplo, grafica el Ozono como TARGET con un TIMESTEP de 3\n\n# In[ ]:\n\n\n# Plot del TARGET y su valor actual\ndef plot_y_true():\n #STATION = \"Las_Condes\"\n STATION, FILTER_YEARS, THETA = get_station(\"INDEP 4F\")\n tempConfig = {\n \"STATION\":STATION,\n \"SCALER\" : preprocessing.StandardScaler,\n \"IMPUTATION\":None,\n \"FILTER_YEARS\" : FILTER_YEARS,\n \"AGREGADOS\":[],\n \"PRECALC\":precalcular_agregados(STATION),\n \"THETA\":THETA,\n \"TARGET\":\"O3\",\n \"TIMESTEP\":5,\n \"OVERLAP\":True,\n \"SHIFT\":-1,\n \"PAST\":False,\n \"FIXED_FEATURES\":['CO', 'PM10', 'PM25', 'NO', 'NOX', 'WD', 'RH', 'TEMP', 'WS', 'UVA', 'UVB', 'O3'],\n \"CUT\": 0.41,\n \"BAN\": [\"EC\",\"countEC\", 'O3btTHETA'],\n \"GRAPH\": True\n }\n tempIC={}\n tempIC[\"complete_dataset\"], tempIC[\"ylabels\"], __Yscaler, __h24scaler = import_merge_and_scale(tempConfig, verbose=False, SCALE=False)\n tempIC[\"data\"], tempIC[\"features\"] = select_features(tempIC, tempConfig, verbose=True)\n tempIC[\"secuencias\"] = obtener_secuencias(tempIC)\n examples, y, date_index = make_examples(tempIC, tempConfig, verbose=False)\n \n FEATURES = tempIC[\"features\"]\n TARGET = tempConfig[\"TARGET\"]\n TIMESTEP = tempConfig[\"TIMESTEP\"]\n \n f = FEATURES.index(TARGET)\n \n print(y.shape)\n\n df = join_index(date_index, y[:,-1,0], \"y (T+1)\")\n ### El indice esta mostrando el ozono, es un array, no un dataframe\n \n for i in range(0, TIMESTEP):\n label = \"T\" if i==0 else \"T-%d\"%i\n gf = join_index(date_index, examples[:,TIMESTEP-1-i,f], label)\n df = pd.concat([df,gf], axis=1)\n df.asfreq(\"D\").iplot(title=\"TARGET: %s, index: %d\"%(TARGET,f), hline=[THETA])\n\n #df[np.datetime64(\"2014-01-03\"):np.datetime64(\"2014-01-08\")]\n \n \nif for_graph():\n plot_y_true()\n\n\n# ## make_traintest\n# Divide los array 'examples', 'y' y 'data_index', según un porcentaje dado como trainset y testset. \n# Además recorta el trainset y testset para hacerlos divisibles por el BATCH_SIZE.\n\n# In[ ]:\n\n\n#Split train and test datasets\ndef make_traintest(internalConfig, Config, verbose=True):\n examples = internalConfig[\"examples\"]\n y = internalConfig[\"y_examples\"]\n date_index = internalConfig[\"dateExamples\"]\n y_len = internalConfig[\"y_len\"]\n BATCH_SIZE = Config[\"BATCH_SIZE\"]\n TRAINPCT = Config[\"TRAINPCT\"]\n TIMEDIST = Config[\"TIMEDIST\"]\n TIMESTEP = Config[\"TIMESTEP\"]\n SHUFFLE = Config[\"SHUFFLE\"]\n \n \n vprint = print if verbose else lambda *a, **k: None\n \n print(\"Making Trainset y Testset ...\")\n train_size = int( len(examples)*TRAINPCT )\n test_size = len(examples) - train_size\n \n ### shuffle examples\n if SHUFFLE:\n np.random.seed(123)\n shuffle_index = list(range(len(examples)))\n np.random.shuffle(shuffle_index)\n examples = examples[shuffle_index]\n date_index = date_index[shuffle_index]\n y = y[shuffle_index]\n \n np.random.seed(123) \n \n \n trainX, trainY = examples[0:train_size], y[0:train_size]\n dateTrain = date_index[0:train_size]\n \n testX, testY = examples[train_size:], y[train_size:]\n dateTest = date_index[train_size:]\n \n \n ###Cortar los dataset para hacerlos calzar con el BATCH_SIZE, necesario cuando se hace stateful\n ##vprint(\" len of Trainset before clip: \", len(trainX))\n ##vprint(\" len of Testset before clip: \", len(testX))\n ##\n ##cuttr = int(train_size/BATCH_SIZE)*BATCH_SIZE\n ##trainX = trainX[:cuttr]\n ##trainY = trainY[:cuttr]\n ##dateTrain = dateTrain[:cuttr]\n ##print(\" Discargind %d last examples in Trainset to match with batch size of %d\"%(train_size-cuttr, BATCH_SIZE))\n ##\n ##cuttst = int(test_size/BATCH_SIZE)*BATCH_SIZE\n ##testX = testX[:cuttst]\n ##testY = testY[:cuttst]\n ##dateTest = dateTest[:cuttst]\n ##print(\" Discargind %d last examples in Testset to match with batch size of %d\"%(test_size-cuttst, BATCH_SIZE))\n \n ###\n ### VALIDATION SPLIT\n ###\n train_size = int( len(trainX)*0.85 )\n validation_size = len(trainX) - train_size\n print(len(trainX),train_size,validation_size)\n \n tempX = trainX\n tempY = trainY\n tempDate = dateTrain\n \n trainX, trainY = tempX[0:train_size], tempY[0:train_size]\n dateTrain = tempDate[0:train_size]\n \n validX, validY = tempX[train_size:], tempY[train_size:]\n dateValid = tempDate[train_size:]\n \n #print(\"trainY.shape, \",trainY.shape)\n #print(\"validX.shape,\", validX.shape)\n #print(\"validY.shape, \",validY.shape)\n \n if TIMEDIST == True:\n trainY = trainY.reshape(-1, TIMESTEP, y_len )\n validY = validY.reshape(-1, TIMESTEP, y_len )\n testY = testY.reshape( -1, TIMESTEP, y_len )\n else:\n trainY = trainY[:,-1]\n validY = validY[:,-1]\n testY = testY[:,-1]\n \n \n \n vprint(\" trainX.shape, \", trainX.shape)\n vprint(\" validX.shape, \", validX.shape)\n vprint(\" testX.shape , \", testX.shape)\n \n vprint(\" trainY.shape, \", trainY.shape)\n vprint(\" validY.shape, \", validY.shape)\n vprint(\" testY.shape, \", testY.shape)\n \n d = {\n 'trainX': trainX,\n 'trainY': trainY,\n 'validX': validX,\n 'validY': validY,\n 'testX' : testX,\n 'testY' : testY,\n 'dateTrain' : dateTrain,\n 'dateValid' : dateValid,\n 'dateTest' : dateTest\n }\n \n return d\n #return trainX, trainY, validX, validY, testX, testY, dateTrain, dateValid, dateTest\n\n\n#join_index(dateTrain, trainY,\"train\").iplot()\n#join_index(dateTest, testY,\"test\").iplot()\n\n\n# ## make_folds_TVT\n\n# In[ ]:\n\n\ndef make_folds_TVT(internalConfig, Config):\n examples = internalConfig[\"examples\"]\n y_examples = internalConfig[\"y_examples\"]\n dateExamples = internalConfig[\"dateExamples\"]\n LAGS = Config[\"TIMESTEP\"]\n STARTYEAR, ENDYEAR = Config[\"FILTER_YEARS\"]\n \n list_trainX = []\n list_trainY = []\n list_validX = []\n list_validY = []\n list_testX = []\n list_testY = []\n list_dateTrain = []\n list_dateValid = []\n list_dateTest = []\n \n inicio = \"{}-11-01\".format\n fin = \"{}-03-31\".format\n dates = dateExamples.astype(\"datetime64\")\n firstyear = int(str(dateExamples[0])[0:4])\n firstyear = STARTYEAR\n for year in range(firstyear, ENDYEAR-1):\n trainRange = [ np.datetime64(inicio(firstyear)), np.datetime64(fin(year+1)) ]\n validRange = [ np.datetime64(inicio(year+1)) , np.datetime64(fin(year+2)) ]\n testRange = [ np.datetime64(inicio(year+2)) , np.datetime64(fin(year+3)) ]\n #print(year)\n #print(trainRange)\n #print(validRange)\n #print(testRange)\n \n mask = (trainRange[0] <= dates) & (dates <= trainRange[1])\n trainX = examples[mask]\n trainY = y_examples[mask][:,-1]\n dateTrain = dateExamples[mask]\n #print(mask)\n \n mask = (validRange[0] <= dates) & (dates <= validRange[1])\n validX = examples[mask]\n validY = y_examples[mask][:,-1]\n dateValid = dateExamples[mask]\n #print(mask)\n \n mask = (testRange[0] <= dates) & (dates <= testRange[1])\n testX = examples[mask]\n testY = y_examples[mask][:,-1]\n dateTest = dateExamples[mask]\n #print(mask)\n \n if len(trainX) != 0 and len(validX) != 0 and len(testX) != 0:\n list_trainX.append(trainX)\n list_trainY.append(trainY)\n list_validX.append(validX)\n list_validY.append(validY)\n list_testX.append(testX)\n list_testY.append(testY)\n list_dateTrain.append(dateTrain)\n list_dateValid.append(dateValid)\n list_dateTest.append(dateTest)\n print(\"Usando {}-{}, lags: {}\".format(firstyear,year+2,LAGS), (len(trainX),len(validX),len(testX)) )\n else:\n print(\"{}-{} excluido, lags: {}, examples:\".format(firstyear,year+2,LAGS), (len(trainX),len(validX),len(testX)) )\n \n d = {\n 'list_trainX':list_trainX,\n 'list_trainY':list_trainY,\n 'list_validX':list_validX,\n 'list_validY':list_validY,\n 'list_testX' :list_testX,\n 'list_testY' :list_testY,\n 'list_dateTrain' :list_dateTrain,\n 'list_dateValid' :list_dateValid,\n 'list_dateTest' :list_dateTest\n }\n \n return d\n\n\n# ## create_filename\n\n# In[ ]:\n\n\ndef create_filename(internalConfig, Config):\n global MODELS_FOLDER\n if \"subFeatures\" in Config:\n features = Config[\"subFeatures\"]\n f_len = len(features)\n else:\n features = internalConfig[\"features\"]\n f_len = internalConfig[\"f_len\"]\n THETA = Config[\"THETA\"]\n FUTURE = Config[\"FUTURE\"]\n PAST = Config[\"PAST\"]\n TARGET = Config[\"TARGET\"]\n BATCH_SIZE = Config[\"BATCH_SIZE\"]\n STATION = Config[\"STATION\"]\n SEED = Config[\"SEED\"]\n if \"subTimeStep\" in Config:\n TIMESTEP = Config[\"subTimeStep\"]\n else:\n TIMESTEP = Config[\"TIMESTEP\"]\n TRAINPCT = Config[\"TRAINPCT\"]\n #OVERWRITE_MODEL = Config[\"OVERWRITE_MODEL\"]\n MODEL_NAME = Config[\"MODEL_NAME\"]\n LAYERS = Config[\"LAYERS\"]\n EPOCHS = Config[\"EPOCHS\"]\n DROP_RATE = Config[\"DROP_RATE\"]\n TIMEDIST = Config[\"TIMEDIST\"]\n SHUFFLE = Config[\"SHUFFLE\"]\n if \"QUANTILES\" in Config:\n strQUANTILES = \"-\" + str(Config[\"QUANTILES\"]).replace(\"0.\",\".\")\n else:\n strQUANTILES = \"\"\n \n if \"isLSTM\" in Config:\n isLSTM = \"-\"+str(Config[\"isLSTM\"])\n else:\n isLSTM = \"\"\n \n if \"qfileHash\" in internalConfig:\n qfileHash = \"-\"+str(internalConfig[\"qfileHash\"])\n else:\n qfileHash = \"\"\n if \"LOSS\" in Config:\n MODEL_NAME += Config[\"LOSS\"]\n \n if Config[\"FOLDS_TVT\"]:\n foldmax = len(internalConfig[\"list_trainX\"])\n currentFold = internalConfig[\"fold\"]\n TVT = \"-TVT%sof%s\"%(currentFold,foldmax)\n else:\n TVT = \"\"\n \n if Config[\"FILTER_YEARS\"]:\n i,f = Config[\"FILTER_YEARS\"]\n fyears = \"-%d-%d\"%(i,f)\n else:\n fyears = \"\"\n \n IMPUT = Config[\"IMPUTATION\"]\n \n strFEATURES = str(features).replace(\"'\",\"\")\n strTD = \"TD\"*TIMEDIST\n FILE_NAME = \"./%s/%s-%.2f-(%d,%d,%d)%s-%s-%.3f-%s-%s-%d-(%s,%s)-%s-%s%sSHU%s%s%s%s%s%s%s.h5\"%(MODELS_FOLDER, MODEL_NAME, THETA, BATCH_SIZE, TIMESTEP, f_len, strQUANTILES, strFEATURES, TRAINPCT, TARGET, LAYERS, EPOCHS, str(PAST), FUTURE, strTD, DROP_RATE, isLSTM , SHUFFLE,TVT, fyears,STATION,SEED,IMPUT, qfileHash )\n FILE_NAME = FILE_NAME.replace(\" \",\"\")\n \n return FILE_NAME\n\n\n# # Metricas\n\n# ## RMSE y MAE\n\n# In[ ]:\n\n\ndef RMSE(ytrue, ypred, THETA=False, norm=False, ALL=False):\n ytrue = ytrue.reshape(-1)\n ypred = ypred.reshape(-1)\n if ALL == True:\n return {\"RMSE\" : RMSE(ytrue, ypred),\n \"RMSPE\" : RMSE(ytrue, ypred, norm=True),\n \"RMSEat\" : RMSE(ytrue, ypred, THETA=THETA),\n \"RMSPEat\" : RMSE(ytrue, ypred, THETA=THETA, norm=True)\n }\n \n if THETA is False:\n includes = np.ones(len(ytrue), dtype=bool)\n else:\n includes = ytrue >= THETA\n \n if includes.any():\n yt = ytrue[includes]\n yp = ypred[includes]\n e = yt - yp\n if norm == True:\n e = e/yt\n #if THETA == False:\n # #for i in range(len(e)):\n # # print(yt[i],yp[i],yt[i]-yp[i], np.abs(e[i]))\n # print(np.mean(np.abs(e)))\n #print(\"###\")\n e2 = e**2\n \n mean = np.mean(e2)\n #if THETA == False and norm == True:\n # for i in range(len(e2)):\n # if (np.abs(e[i]) >= 1):\n # print(yt[i],yp[i],yt[i]-yp[i], np.abs(e[i]),e2[i])\n # print(mean)\n return np.sqrt(mean)\n else:\n return np.nan\n\n\n# In[ ]:\n\n\nyt = np.array([1, 2, 3, 4, 1])\nyp = np.array([1, 2, 1, 1, 1])\n\nprint(np.mean(yt))\nprint(RMSE(yt,yp))\nprint(RMSE(yt,yp, norm=True))\nprint(np.sqrt( ( ((1-1)/1)**2 + ((2-2)/2)**2 + ((3-1)/3)**2 + ((4-1)/4)**2 + ((1-1)/1)**2 )/5 ) )\nprint(RMSE(yt,yp, ALL= True, THETA = 3))\n\n\n# In[ ]:\n\n\ndef MAE(ytrue, ypred, THETA=False, norm=False, ALL=False):\n if ALL == True:\n return {\"MAE\" : MAE(ytrue, ypred),\n \"MAPE\" : MAE(ytrue, ypred, norm=True),\n \"MAEat\" : MAE(ytrue, ypred, THETA=THETA),\n \"MAPEat\" : MAE(ytrue, ypred, THETA=THETA, norm=True)\n }\n \n if THETA is False:\n includes = np.ones(len(ytrue), dtype=bool)\n else:\n includes = ytrue >= THETA\n \n if includes.any():\n yt = ytrue[includes]\n yp = ypred[includes]\n e = yt - yp\n if norm == True:\n e = e/yt\n eabs = np.abs(e)\n mean = np.mean(eabs)\n return mean\n else:\n return np.nan\n\n\n# ## Quantile Metrics & Interval Coverage\n\n# In[ ]:\n\n\n#ytrue = actual\n#ypred = quantile_values\n#quantile_probs -> real quantiles\ndef quantile_metrics(actual, quantile_probs, quantile_values):\n actual = actual.reshape(-1)\n quantile_losses = []\n \n for idx in range(len(quantile_probs)):\n #print(actual.shape)\n #print(quantile_values.shape)\n #print(quantile_values[:,idx].shape)\n res = actual - quantile_values[:,idx]\n #print(res.shape)\n q = quantile_probs[idx]\n t=np.maximum(q*res, (q-1.0)*res)\n #print(t.shape)\n #print(\"######\")\n \n qloss = np.mean(t) #pinball loss\n #if q == 0.9:\n # print(actual[-15:])\n # print(quantile_values[:,idx][-15:])\n # print(res[-15:])\n # print(t[-15:])\n quantile_losses.append(qloss)\n\n quantile_losses = np.array(quantile_losses)\n return np.mean(quantile_losses), quantile_losses\n\n\n# In[ ]:\n\n\ndef IC_metrics(inf_limit, sup_limit, nominal_sig, true_values, base_prediction=0.0):\n inf_limit = inf_limit.flatten()\n sup_limit = sup_limit.flatten()\n true_values = true_values.flatten()\n base_prediction = base_prediction.flatten()\n \n EPS_CONST = np.finfo(float).eps\n rho = nominal_sig\n cov_prob = 0.0\n \n excess = []\n \n for idx in range(len(true_values)):\n one_excess = 0.0\n if true_values[idx] > sup_limit[idx]:\n one_excess = (2.0/rho)*(true_values[idx] - sup_limit[idx])\n elif true_values[idx] < inf_limit[idx]:\n one_excess = (2.0/rho)*(inf_limit[idx] - true_values[idx])\n else: \n cov_prob += 1.0\n\n excess.append(one_excess)\n\n excess = np.array(excess)\n MIS = sup_limit - inf_limit + excess #mean interval score\n MSIS = np.divide(MIS, np.abs(true_values - base_prediction) + EPS_CONST) #mean scaled interval score\n #MSIS = np.divide(MIS, np.abs(true_values) + EPS_CONST) #mean scaled interval score\n lenght = sup_limit - inf_limit\n\n #return np.mean(MIS), np.mean(MSIS), np.mean(cov_prob), np.mean(lenght)\n return np.mean(MIS), np.mean(MSIS), cov_prob/len(true_values), np.mean(lenght)\n\n\n# In[ ]:\n\n\n#def RMSEat(theta, ytrue, ypred):\n# includes = ytrue >= theta\n# if includes.any():\n# return math.sqrt(mean_squared_error(ytrue[includes], ypred[includes] ))\n# else:\n# return np.nan\n\n\n# In[ ]:\n\n\n#def MAEat(theta, ytrue, ypred):\n# includes = ytrue >= theta\n# if includes.any():\n# return mean_absolute_error(ytrue[includes], ypred[includes] )\n# else:\n# return np.nan\n\n\n# # Lossses\n# ## LossAtTHETA\n\n# In[ ]:\n\n\ndef lossAtTHETA(scaled_THETA, ytrue, ypred):\n loss = 0\n e = ytrue - ypred\n loss = K.sum(K.square( e*K.maximum( K.sign(ytrue-scaled_THETA), 0) ), axis=-1)\n\n return loss\n\n\n# In[ ]:\n\n\nnp.sign(61-(61-K.epsilon()))\n\n\n# In[ ]:\n\n\n# Loss Function\ndef custom_loss(ytrue, ypred):\n loss = 0\n #for i in range(ylen):\n e = ytrue - ypred\n loss += K.mean(K.square( e )*K.clip( ytrue-60, 0, 1 ), axis=-1)\n \n #loss = K.mean(K.square(ytrue[:, 0]-ypred[:, 0]), axis=-1)\n \n #for k in range(len(quantiles)):\n # q = quantiles[k]\n # e = (ytrue[:, ylen+k]-ypred[:, ylen+k])\n # loss += K.mean(q*e + K.clip(-e, K.epsilon(), np.inf), axis=-1)\n return loss\n\n\n# ## loss2YatTHETA\n\n# In[ ]:\n\n\ndef loss2YatTHETA(scaled_THETA, ytrue, ypred):\n scaled_THETA -= K.epsilon()\n e1 = ytrue[:,0] - ypred[:,0]\n e2 = ytrue[:,1] - ypred[:,1]\n \n # <= THETA\n #loss1 = K.mean(K.square(ytrue[:,0]- ypred[:,0]))\n loss1 = K.sum(K.square( e1*(1-K.maximum( K.sign(ytrue[:,0]-scaled_THETA), 0)) ), axis=-1)\n loss1 = loss1/K.maximum(1., K.sum(1-K.maximum( K.sign(ytrue[:,0]-scaled_THETA), 0)))\n \n # > THETA\n loss2 = K.sum(K.square( e2*(K.maximum( K.sign(ytrue[:,1]-scaled_THETA), 0)) ), axis=-1)\n loss2 = loss2/K.maximum(1., K.sum(K.maximum( K.sign(ytrue[:,1]-scaled_THETA), 0)))\n \n #mean\n loss3 = 0\n #loss3 = K.mean(K.square(ytrue[:,2]- ypred[:,2]))\n \n return loss1 + loss2 + loss3\n\n\n# In[ ]:\n\n\ndef loss2YatMEAN(scaled_THETA, ytrue, ypred):\n scaled_THETA -= K.epsilon()\n e1 = ytrue[:,0] - ypred[:,0]\n e2 = ytrue[:,1] - ypred[:,1]\n \n # <= THETA\n #loss1 = K.mean(K.square(ytrue[:,0]- ypred[:,0]))\n loss1 = K.sum(K.square( e1*(1-K.maximum( K.sign(ytrue[:,0]-scaled_THETA), 0)) ), axis=-1)\n loss1 = loss1/K.maximum(1., K.sum(1-K.maximum( K.sign(ytrue[:,0]-scaled_THETA), 0)))\n \n # > THETA\n loss2 = K.sum(K.square( e2*(K.maximum( K.sign(ytrue[:,1]-scaled_THETA), 0)) ), axis=-1)\n loss2 = loss2/K.maximum(1., K.sum(K.maximum( K.sign(ytrue[:,1]-scaled_THETA), 0)))\n \n #mean\n loss3 = 0\n #loss3 = K.mean(K.square(ytrue[:,2]- ypred[:,2]))\n \n return loss1 + loss2 + loss3\n\n\n# # Clasificador\n\n# ## ClassifierLSTM\n\n# In[ ]:\n\n\ndef ClassifierLSTM(Config): #, AGREGADOS, TARGET, THETA, FUTURE, PAST, FEATURES, CUT, BAN, TIMESTEP, OVERLAP, BATCH_SIZE, TRAINPCT, OVERWRITE_MODEL, MODEL_NAME, LAYERS, EPOCHS, TIMEDIST):\n np.random.seed(123)\n \n\n TIMESTEP = Config['TIMESTEP']\n TIMEDIST = Config['TIMEDIST']\n Config['SHIFT'] = Config['FUTURE'] * -1\n \n scalers = {}\n ic = {\"scalers\": scalers}\n \n complete_dataset, ylabels, Yscaler, h24scaler = import_merge_and_scale(Config, verbose=False)\n ic[\"complete_dataset\"] = complete_dataset\n ic[\"ylabels\"] = ylabels\n scalers['Yscaler'] = Yscaler\n scalers[\"h24scaler\"] = h24scaler\n \n y_len = len(ic[\"ylabels\"])\n ic[\"y_len\"] = y_len\n \n # TEST - Agregando datos del día siguiente - Son lineas COMENTABLES\n #dataset = pd.concat([dataset[dataset.columns[:-1]], dataset[[\"TEMP\",\"UVB\"]].shift(-1), dataset[dataset.columns[-1:]]], axis=1)\n #dataset.columns = dataset.columns[:-3].tolist() + [\"TEMP2\",\"UVB2\",\"y\"]\n #dataset[[\"TEMP\",\"UVB\"]] = dataset[[\"TEMP\",\"UVB\"]].shift(-1)\n \n data, features = select_features(ic, Config)\n ic[\"data\"] = data\n ic[\"features\"] = features\n \n ic[\"f_len\"] = len(ic[\"features\"])\n \n ic[\"secuencias\"] = obtener_secuencias(ic)\n \n examples, y_examples, dateExamples = make_examples(ic, Config, verbose=False)\n ic[\"examples\"] = examples\n ic[\"y_examples\"] = y_examples\n ic[\"dateExamples\"] = dateExamples\n \n trainX, trainY, testX, testY, dateTrain, dateTest = make_traintest(ic, Config, verbose=False)\n ic[\"trainX\"] = trainX\n ic[\"trainY\"] = trainY\n ic[\"testX\"] = testX\n ic[\"testY\"] = testY\n ic[\"dateTrain\"] = dateTrain\n ic[\"dateTest\"] = dateTest\n \n \n \n \n print(\"trainY.shape, \",ic[\"trainY\"].shape)\n if TIMEDIST == True:\n ic[\"trainY\"] = ic[\"trainY\"].reshape(-1, TIMESTEP, y_len )\n ic[\"testY\"] = ic[\"testY\"].reshape(-1, TIMESTEP, y_len )\n else:\n ic[\"trainY\"] = ic[\"trainY\"][:,-1]\n ic[\"testY\"] = ic[\"testY\"][:,-1]\n print(\"trainY.shape, \",ic[\"trainY\"].shape)\n \n EXAMPLES = Config[\"USE_EXAMPLES\"]\n if EXAMPLES != None:\n print(\"jiji\",ic[\"trainY\"].shape)\n ic[\"trainY\"] = EXAMPLES[\"trainY\"].copy()\n ic[\"testY\"] = EXAMPLES[\"testY\"].copy()\n print(\"jiji\",ic[\"trainY\"].shape)\n \n \n cModel, file_name = ClassModel(ic, Config)\n ic[\"model\"] = cModel\n ic[\"file_name\"] = file_name\n \n trainPred, testPred = ClassPredict( ic, Config)\n ic[\"trainPred\"] = trainPred\n ic[\"testPred\"] = testPred\n \n print(\"classif,\", np.sum(trainPred), np.sum(ic[\"trainY\"]))\n print(\"classif,\", np.sum(testPred), np.sum(ic[\"testY\"]))\n \n #scalers = {\"Yscaler\":Yscaler,\"h24scaler\":h24scaler}\n #d = {\"data\":data, \"trainX\":trainX, \"trainY\":trainY, \"testX\":testX, \"testY\":testY, \"dateTrain\":dateTrain, \"dateTest\":dateTest,\n #\"trainPred\":ftrp, \"trainYtrue\":ftry, \"testPred\":ftep, \"testYtrue\":ftey,\n # \"modelName\":MODEL_NAME, \"scalers\":scalers}\n return ic\n\n\n# ## ClassPredict\n\n# In[ ]:\n\n\ndef ClassPredict(internalConfig, Config):\n np.random.seed(123)\n set_random_seed(2)\n cModel = internalConfig[\"model\"]\n trainX = internalConfig[\"trainX\"]\n trainY = internalConfig[\"trainY\"]\n testX = internalConfig[\"testX\"]\n testY = internalConfig[\"testY\"]\n \n BATCH_SIZE = Config[\"BATCH_SIZE\"]\n TIMEDIST = Config[\"TIMEDIST\"]\n PRED_TYPE = Config[\"PRED_TYPE\"]\n \n # Predictions\n print(\"Classifier Prediction ...\")\n trainPred = cModel.predict(trainX, batch_size = BATCH_SIZE)\n testPred = cModel.predict(testX, batch_size = BATCH_SIZE)\n \n # if TimeDistributed\n if TIMEDIST == True:\n pass\n else:\n print(trainPred.shape)\n print(trainY.shape)\n trainPred = trainPred[:, 0, None]\n trainY = trainY[:, 0, None]\n testPred = testPred[:, 0, None]\n testY = testY[:, 0, None]\n print(trainPred.shape)\n print(trainY.shape)\n \n \n print(\"classTrainPred\", np.sum(trainPred), np.sum(trainY))\n print(\"classTestPred\", np.sum(testPred), np.sum(testY))\n #for x,y in zip(trainPred, trainY):\n # print((x>= np.mean(trainPred))*1, y,x )\n ####trainPred = np.round(trainPred)\n ####testPred = np.round(testPred)\n if PRED_TYPE == \"hard\":\n thetita = 0.5 #np.mean(trainPred)\n thetita = np.mean(trainPred)\n trainPred = (trainPred >= thetita)*1\n testPred = (testPred >= thetita)*1\n elif PRED_TYPE == \"soft\":\n # se mantiene igual\n pass\n print(\"classTrainPred\", np.sum(trainPred), np.sum(trainY))\n print(\"classTestPred\", np.sum(testPred), np.sum(testY))\n \n #print(trainY)\n #print(trainPred)\n # Invert Predictions\n #trainPred = Yscaler.inverse_transform(trainPred)\n #trainYinv = Yscaler.inverse_transform(trainY)\n #testPred = Yscaler.inverse_transform(testPred)\n #testYinv = Yscaler.inverse_transform(testY)\n \n # calculate root mean squared error\n if PRED_TYPE == 'hard':\n print(\"Calculando Accuracy\")\n trainScore = accuracy_score(trainY, trainPred)\n print(' Train Accuracy: %.2f%%' % (trainScore*100))\n testScore = accuracy_score(testY, testPred)\n print(' Test Accuracy: %.2f%%' % (testScore*100))\n elif PRED_TYPE == 'soft':\n print(\"Calculando Accuracy\")\n print(\"Not for PRED_TYPE='soft'\")\n \n \n #join_index(trainPred,)\n return trainPred, testPred\n \n \n \n \n\n\n# ## ClassModel\n\n# In[ ]:\n\n\ndef ClassModel(internalConfig, Config ):\n np.random.seed(123)\n set_random_seed(2)\n \n trainX = internalConfig[\"trainX\"]\n trainY = internalConfig[\"trainY\"]\n #ylen = internalConfig[\"ylen\"]\n features = internalConfig[\"features\"]\n f_len = internalConfig[\"f_len\"]\n #THETA = Config[\"THETA\"]\n #FUTURE = Config[\"FUTURE\"]\n #PAST = Config[\"PAST\"]\n #TARGET = Config[\"TARGET\"]\n #TRAINPCT = Config[\"TRAINPCT\"]\n #MODEL_NAME = Config[\"MODEL_NAME\"]\n #TIMEDIST = Config[\"TIMEDIST\"]\n BATCH_SIZE = Config[\"BATCH_SIZE\"]\n TIMESTEP = Config[\"TIMESTEP\"]\n OVERWRITE_MODEL = Config[\"OVERWRITE_MODEL\"]\n LAYERS = Config[\"LAYERS\"]\n EPOCHS = Config[\"EPOCHS\"]\n DROP_RATE = Config[\"DROP_RATE\"]\n \n \n print(\"in ClassModel ...\")\n print(\" trainX.shape, \", trainX.shape)\n print(\" trainY.shape\", trainY.shape)\n \n #strFEATURES = str(features).replace(\"'\",\"\")\n #strTD = \"TD\"*TIMEDIST\n #FILE_NAME = \"./%s/%s-%.2f-(%d,%d,%d)-%s-%.3f-%s-%s-%d-(%s,%s)-%s.h5\"%(MODELS_FOLDER, MODEL_NAME, THETA, BATCH_SIZE, TIMESTEP, f_len, strFEATURES, TRAINPCT, TARGET, LAYERS, EPOCHS, str(PAST), FUTURE, strTD)\n #FILE_NAME = FILE_NAME.replace(\" \",\"\")\n FILE_NAME = create_filename(internalConfig, Config)\n \n \n try:\n if OVERWRITE_MODEL == False:\n print(\"Loading Model...\")\n cModel = load_model(FILE_NAME)\n print(FILE_NAME + \" loaded =)\")\n print(\"LISTO\")\n else:\n print(\"Reentrenando \"+ FILE_NAME)\n load_model(\"noexisto_nijamas_existire.h5\")\n except:\n print(\"batch_input_shape:(%d,%d,%d)\"%(BATCH_SIZE,TIMESTEP,f_len))\n cModel = Sequential()\n for Neurons in LAYERS[:-1]:\n cModel.add(LSTM(Neurons, activation=\"relu\", input_shape=(TIMESTEP, f_len), return_sequences=True))\n cModel.add(Dropout(rate=DROP_RATE))\n cModel.add(LSTM(LAYERS[-1], activation=\"relu\", input_shape=(TIMESTEP, f_len), return_sequences=False))\n cModel.add(Dropout(rate=DROP_RATE))\n cModel.add(Dense( 1, activation='sigmoid'))\n cModel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n #cModel.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])\n cModel.fit( trainX, trainY, epochs=EPOCHS, batch_size=BATCH_SIZE )\n \n \n cModel.save(FILE_NAME)\n print(FILE_NAME + \" saved. = )\")\n \n return cModel, FILE_NAME\n\n\n# ## Pruebas\n\n# In[ ]:\n\n\nSTATION = \"Las_Condes\"\nClassifierLSTMconfig={\n # import_merge_and_scale()\n \"SCALER\" : preprocessing.StandardScaler,\n \"AGREGADOS\" : [],#[\"O3\",\"TEMP\",\"WS\",\"RH\"], #[\"ALL\"] #Horas de los maximos que se quieren agregar.\n \"PRECALC\" : precalcular_agregados(STATION),\n \"TARGET\" : \"EC\",\n \"THETA\" : 61,\n \"FUTURE\" : 1,\n \"PAST\" : False,\n \n # select_features()\n \"FIXED_FEATURES\" : [], # empty list means that the CUT will be used\n \"CUT\" : 0.26,\n \"BAN\" : [],#[\"countEC\"],#[\"countEC\", \"EC\"],# []\n \n #make_examples()\n \"TIMESTEP\" : 14,\n \"OVERLAP\" : True,\n \n #make_traintest()\n \"SHUFFLE\" : True,\n \"BATCH_SIZE\" : 16,\n \"TRAINPCT\" : 0.85,\n \n #myLSTM()\n \"OVERWRITE_MODEL\" : False,\n \"MODEL_NAME\" : \"ClassifierLSTMModel\",\n \"LAYERS\" : [4,4],\n \"EPOCHS\" : 100,\n \"DROP_RATE\" : 0.4,\n \"TIMEDIST\" : False, #SIN IMPLEMENTAR\n }\n#ClassifierOutput = ClassifierLSTM( ClassifierLSTMconfig )\n\n\n# In[ ]:\n\n\n#co = ClassifierOutput\n#len(co[\"trainY\"])\n\n\n# # LSTM\n# LSTM con función de pérdida de 'mean_squared_error'.\n\n# ## myLSTM\n\n# In[ ]:\n\n\ndef myLSTM(Config): #, AGREGADOS, TARGET, PRECALC, THETA, FUTURE, PAST, FEATURES, CUT, BAN, TIMESTEP, OVERLAP, BATCH_SIZE, TRAINPCT, OVERWRITE_MODEL, MODEL_NAME, LAYERS, EPOCHS, TIMEDIST, Yx):\n #np.random.seed(123)\n #set_random_seed(2)\n \n FOLDS_TVT = Config[\"FOLDS_TVT\"]\n TIMESTEP = Config['TIMESTEP']\n #TIMEDIST = Config['TIMEDIST']\n Config['SHIFT'] = Config['FUTURE'] * -1\n \n #SHIFT = FUTURE*-1\n #PRECALC = precalcular_agregados()\n \n \n scalers = {}\n ic = {\"scalers\": scalers}\n \n complete_dataset, ylabels, Yscaler, h24scaler = import_merge_and_scale(Config, verbose=False)\n ic[\"complete_dataset\"] = complete_dataset\n ic[\"ylabels\"] = ylabels\n scalers['Yscaler'] = Yscaler\n scalers[\"h24scaler\"] = h24scaler\n \n # TEST - Agregando datos del día siguiente - Son lineas COMENTABLES\n #dataset = pd.concat([dataset[dataset.columns[:-1]], dataset[[\"TEMP\",\"UVB\"]].shift(-1), dataset[dataset.columns[-1:]]], axis=1)\n #dataset.columns = dataset.columns[:-3].tolist() + [\"TEMP2\",\"UVB2\",\"y\"]\n #dataset[[\"TEMP\",\"UVB\"]] = dataset[[\"TEMP\",\"UVB\"]].shift(-1)\n \n y_len = len(ic[\"ylabels\"])\n ic[\"y_len\"] = y_len\n \n \n data, features = select_features(ic, Config)\n ic[\"data\"] = data\n ic[\"features\"] = features\n \n ic[\"f_len\"] = len(ic[\"features\"])\n \n ic[\"secuencias\"] = obtener_secuencias(ic)\n \n #data, FEATURES = select_features(dataset, ylabels, FEATURES, CUT, BAN)\n #F_LEN = len(FEATURES)\n \n #strFEATURES = str(FEATURES).replace(\"'\",\"\")\n #strTD = \"TD\"*TIMEDIST\n #FILE_NAME = \"./%s/%s-(%d,%d,%d)-%s-%.3f-%s-%s-%d-(%s,%s)-%s.h5\"%(MODELS_FOLDER,MODEL_NAME, BATCH_SIZE, TIMESTEP, F_LEN, strFEATURES, TRAINPCT, TARGET, LAYERS, EPOCHS, str(PAST), FUTURE, strTD)\n #FILE_NAME = FILE_NAME.replace(\" \",\"\")\n \n \n #SECUENCIAS = obtener_secuencias(data)\n #examples, y, date_index = make_examples(data, SECUENCIAS, ylabels, TIMESTEP, OVERLAP, verbose=False)\n #trainX, trainY, testX, testY, dateTrain, dateTest = make_traintest(examples, y, date_index, BATCH_SIZE, TRAINPCT, verbose=False)\n \n examples, y_examples, dateExamples = make_examples(ic, Config, verbose=False)\n ic[\"examples\"] = examples\n ic[\"y_examples\"] = y_examples\n ic[\"dateExamples\"] = dateExamples\n \n if FOLDS_TVT == False:\n tvtDict = make_traintest(ic, Config, verbose=False)\n for key in tvtDict:\n ic[key] = tvtDict[key]\n #trainX, trainY, validX, validY, testX, testY, dateTrain, dateValid, dateTest = make_traintest(ic, Config, verbose=False)\n #ic[\"trainX\"] = trainX\n #ic[\"trainY\"] = trainY\n #ic[\"validX\"] = validX\n #ic[\"validY\"] = validY\n #ic[\"testX\"] = testX\n #ic[\"testY\"] = testY\n #ic[\"dateTrain\"] = dateTrain\n #ic[\"dateValid\"] = dateValid\n #ic[\"dateTest\"] = dateTest\n else:\n list_tvtDict = make_folds_TVT(ic, Config)\n for key in list_tvtDict:\n ic[key] = list_tvtDict[key]\n \n list_models , file_name = myLSTMModel(ic, Config)\n \n if FOLDS_TVT == False:\n ic[\"model\"] = list_models[0]\n ic[\"list_models\"] = [ ic[\"model\"] ]\n else:\n ic[\"list_models\"] = list_models\n \n ic[\"file_name\"] = file_name #solo es el ultimo modelo\n \n \n \n \n detail = myLSTMPredict(ic, Config)\n ic[\"detail\"] = detail\n #trainPred, validPred, testPred = myLSTMPredict(ic, Config)\n #ic[\"trainPred\"] = trainPred\n #ic[\"validPred\"] = validPred\n #ic[\"testPred\"] = testPred\n \n return ic\n\n\n# ## myLSTMPredict\n\n# In[ ]:\n\n\ndef myLSTMPredict(internalConfig, Config):\n TIMEDIST = Config[\"TIMEDIST\"]\n BATCH_SIZE = Config[\"BATCH_SIZE\"]\n THETA = Config[\"THETA\"]\n TARGET = Config[\"TARGET\"]\n FOLDS_TVT = Config[\"FOLDS_TVT\"]\n GRAPH = Config[\"GRAPH\"]\n moreTHETA = Config[\"moreTHETA\"]\n Yx = Config[\"Yx\"]\n Yscaler = internalConfig[\"scalers\"][\"Yscaler\"]\n list_models = internalConfig[\"list_models\"]\n \n if FOLDS_TVT == False:\n list_trainX = [ internalConfig[ \"trainX\"] ]\n list_trainY = [ internalConfig[ \"trainY\"] ]\n list_validX = [ internalConfig[ \"validX\"] ]\n list_validY = [ internalConfig[ \"validY\"] ]\n list_testX = [ internalConfig[ \"testX\" ] ]\n list_testY = [ internalConfig[ \"testY\" ] ]\n list_dateTrain = [ internalConfig[\"dateTrain\"] ]\n list_dateValid = [ internalConfig[\"dateValid\"] ]\n list_dateTest = [ internalConfig[ \"dateTest\"] ]\n \n else:\n list_trainX = internalConfig[ \"list_trainX\"]\n list_trainY = internalConfig[ \"list_trainY\"]\n list_validX = internalConfig[ \"list_validX\"]\n list_validY = internalConfig[ \"list_validY\"]\n list_testX = internalConfig[ \"list_testX\"]\n list_testY = internalConfig[ \"list_testY\"]\n list_dateTrain = internalConfig[\"list_dateTrain\"]\n list_dateValid = internalConfig[\"list_dateValid\"]\n list_dateTest = internalConfig[ \"list_dateTest\"]\n \n \n # RMSE\n trainRMSE = []\n validRMSE = []\n testRMSE = []\n \n # MAE\n trainMAE = []\n validMAE = []\n testMAE = []\n \n scores_detail = {\n \"train\": {'quantity':[]},\n \"valid\": {'quantity':[]},\n \"test\" : {'quantity':[]},\n }\n \n list_df = []\n \n print(\"Calculando Predicciones\")\n\n for fold in range(0,len(list_trainX)):\n trainX = list_trainX[fold]\n trainY = list_trainY[fold]\n validX = list_validX[fold]\n validY = list_validY[fold]\n testX = list_testX[fold]\n testY = list_testY[fold]\n dateTrain = list_dateTrain[fold]\n dateValid = list_dateValid[fold]\n dateTest = list_dateTest[fold]\n \n \n \n \n \n classicModel = list_models[fold]\n \n \n # Predictions\n \n trainPred = classicModel.predict(trainX)\n classicModel.reset_states()\n validPred = classicModel.predict(validX)\n classicModel.reset_states()\n testPred = classicModel.predict(testX)\n classicModel.reset_states()\n \n #TimeDistributed\n T = Yx\n if TIMEDIST == True:\n #print(trainPred.shape)\n #print(trainY.shape)\n trainPred = trainPred[:,-1, T].reshape(-1, 1)\n trainY = trainY[:,-1,0].reshape(-1, 1)\n validPred = validPred[:,-1, T].reshape(-1, 1)\n validY = validY[:,-1,0].reshape(-1, 1)\n testPred = testPred[:,-1, T].reshape(-1, 1)\n testY = testY[:,-1,0].reshape(-1, 1)\n #print(trainPred.shape)\n #print(trainPred.shape)\n else:\n #print(trainPred.shape)\n #print(trainY.shape)\n trainPred = trainPred[:, T, None]\n trainY = trainY[:, T, None]\n validPred = validPred[:, T, None]\n validY = validY[:, T, None]\n testPred = testPred[:, T, None]\n testY = testY[:, T, None]\n #print(trainPred.shape)\n #print(trainY.shape)\n \n # Invert Predictions\n trainPred = Yscaler.inverse_transform(trainPred)\n trainYinv = Yscaler.inverse_transform(trainY)\n \n validPred = Yscaler.inverse_transform(validPred)\n validYinv = Yscaler.inverse_transform(validY)\n \n testPred = Yscaler.inverse_transform(testPred)\n testYinv = Yscaler.inverse_transform(testY)\n \n # stats\n num_train = len(trainX)\n num_valid = len(validX)\n num_test = len(testX)\n \n num_THETA_train = np.sum(trainYinv >= THETA)\n num_THETA_valid = np.sum(validYinv >= THETA)\n num_THETA_test = np.sum(testYinv >= THETA)\n \n scores_detail[\"train\"]['quantity'].append( (num_train, num_THETA_train) )\n scores_detail[\"valid\"]['quantity'].append( (num_valid, num_THETA_valid) )\n scores_detail[\"test\"]['quantity'].append( (num_test, num_THETA_test) )\n \n \n # calculate root mean squared error\n yt, yp = trainYinv, trainPred\n scores = RMSE(yt, yp, THETA=THETA, ALL=True)\n scores = [scores[\"RMSE\"], scores[\"RMSPE\"], scores[\"RMSEat\"], scores[\"RMSPEat\"] ]\n for t in moreTHETA:\n scores.append( RMSE(yt, yp, THETA=t) )\n scores.append( RMSE(yt, yp, THETA=t, norm=True) )\n trainRMSE.append( scores )\n yt, yp = validYinv, validPred\n scores = RMSE(yt, yp, THETA=THETA, ALL=True)\n scores = [scores[\"RMSE\"], scores[\"RMSPE\"], scores[\"RMSEat\"], scores[\"RMSPEat\"] ]\n for t in moreTHETA:\n scores.append( RMSE(yt, yp, THETA=t) )\n scores.append( RMSE(yt, yp, THETA=t, norm=True) )\n validRMSE.append( scores )\n yt, yp = testYinv, testPred\n scores = RMSE(yt, yp, THETA=THETA, ALL=True)\n scores = [scores[\"RMSE\"], scores[\"RMSPE\"], scores[\"RMSEat\"], scores[\"RMSPEat\"] ]\n for t in moreTHETA:\n scores.append( RMSE(yt, yp, THETA=t) )\n scores.append( RMSE(yt, yp, THETA=t, norm=True) )\n testRMSE.append( scores )\n a,b = trainRMSE[-1][0:2]\n c,d = validRMSE[-1][0:2]\n e,f = testRMSE[-1][0:2]\n print(\"fold %s score (RMSE,RMSPE): (%.2f, %.2f) (%.2f, %.2f) (%.2f, %.2f)\"%(fold, a,b,c,d,e,f))\n \n #scores = RMSE(validYinv, validPred, THETA=THETA, ALL=True)\n #validRMSE.append( [scores[\"RMSE\"], scores[\"RMSPE\"], scores[\"RMSEat\"], scores[\"RMSPEat\"]] )\n #scores = RMSE(testYinv, testPred, THETA=THETA, ALL=True)\n #testRMSE.append( [scores[\"RMSE\"], scores[\"RMSPE\"], scores[\"RMSEat\"], scores[\"RMSPEat\"]] )\n \n \n #trainScore = RMSE(trainYinv, trainPred, 61)\n #trainRMSE[-1].append( trainScore )\n #validScore = RMSE(validYinv, validPred, 61)\n #validRMSE[-1].append( validScore )\n #testScore = RMSE(testYinv, testPred, 61)\n #testRMSE[-1].append( testScore )\n \n \n #MAE\n yt, yp = trainYinv, trainPred\n scores = MAE(yt, yp, THETA=THETA, ALL=True)\n scores = [scores[\"MAE\"], scores[\"MAPE\"], scores[\"MAEat\"], scores[\"MAPEat\"] ]\n for t in moreTHETA:\n scores.append( MAE(yt, yp, THETA=t) )\n scores.append( MAE(yt, yp, THETA=t, norm=True) )\n trainMAE.append( scores )\n #scores = MAE(trainYinv, trainPred, THETA=THETA, ALL=True)\n #trainMAE.append( [scores[\"MAE\"], scores[\"MAPE\"], scores[\"MAEat\"], scores[\"MAPEat\"]] )\n yt, yp = validYinv, validPred\n scores = MAE(yt, yp, THETA=THETA, ALL=True)\n scores = [scores[\"MAE\"], scores[\"MAPE\"], scores[\"MAEat\"], scores[\"MAPEat\"] ]\n for t in moreTHETA:\n scores.append( MAE(yt, yp, THETA=t) )\n scores.append( MAE(yt, yp, THETA=t, norm=True) )\n validMAE.append( scores )\n #scores = MAE(validYinv, validPred, THETA=THETA, ALL=True)\n #validMAE.append( [scores[\"MAE\"], scores[\"MAPE\"], scores[\"MAEat\"], scores[\"MAPEat\"]] )\n yt, yp = testYinv, testPred\n scores = MAE(yt, yp, THETA=THETA, ALL=True)\n scores = [scores[\"MAE\"], scores[\"MAPE\"], scores[\"MAEat\"], scores[\"MAPEat\"] ]\n for t in moreTHETA:\n scores.append( MAE(yt, yp, THETA=t) )\n scores.append( MAE(yt, yp, THETA=t, norm=True) )\n testMAE.append( scores )\n #scores = MAE(testYinv, testPred, THETA=THETA, ALL=True)\n #testMAE.append( [scores[\"MAE\"], scores[\"MAPE\"], scores[\"MAEat\"], scores[\"MAPEat\"]] )\n \n\n if GRAPH == True:\n print(\"Graficando...\")\n df1 = join_index(dateTrain,trainYinv, \"Y\")\n df2 = join_index(dateTrain,trainPred, \"f train\")\n \n df3 = join_index(dateValid,validYinv, \"Y\")\n df4 = join_index(dateValid,validPred, \"f validation\")\n \n df5 = join_index(dateTest,testYinv, \"Y\")\n df6 = join_index(dateTest,testPred, \"f test\")\n \n ydf = pd.concat([df1, df3, df5], axis=0)\n df = pd.concat( [ydf, df4, df2, df6] ,axis=1)\n \n #traindf = pd.concat([df1,df2], axis=1)\n #testdf = pd.concat([df3,df4], axis=1)\n \n #df = pd.concat([traindf,testdf])\n \n #fecha_test = testdf.index[0]\n #df.asfreq(\"D\").iplot(title = \"%s - %s\"%(TARGET, LAYERS), vline=[fecha_test], hline=[THETA])\n df.asfreq(\"D\").iplot(title = \"%s\"%(TARGET), hline=[THETA, df[\"Y\"].mean()], vline=[dateValid[0],dateTest[0]])\n \n df = df.asfreq(\"D\")\n \n list_df.append(df)\n \n print(\"### LSTM ###\")\n \n labels = [\"RMSE\", \"RMSPE\", \"RMSEat%s\"%THETA, \"RMSPEat%s\"%THETA]\n for t in moreTHETA:\n labels.append(\"RMSEat%s\"%t)\n labels.append(\"RMSPEat%s\"%t)\n maxlen = max(map(len,labels))\n trainMeans = np.nanmean(trainRMSE, axis=0)\n validMeans = np.nanmean(validRMSE, axis=0)\n testMeans = np.nanmean(testRMSE, axis=0)\n print(\"{:>{maxlen}}{:>8}{:>8}{:>8}\".format(\"\",\"Train\",\"Valid\",\"Test\",maxlen=maxlen))\n for i in range(len(labels)):\n print( (\"{0:>{maxlen}}{1:8.2f}{2:8.2f}{3:8.2f}\".format (labels[i], trainMeans[i], validMeans[i], testMeans[i], maxlen=maxlen)) )\n \n scores_detail[\"train\"][labels[i]] = np.array(trainRMSE)[:,i]\n scores_detail[\"valid\"][labels[i]] = np.array(validRMSE)[:,i]\n scores_detail[\"test\"][labels[i]] = np.array(testRMSE)[:,i]\n \n print(\"\\n\")\n \n labels = [\"MAE\", \"MAPE\", \"MAEat%s\"%THETA, \"MAPEat%s\"%THETA]\n for t in moreTHETA:\n labels.append(\"RMSEat%s\"%t)\n labels.append(\"RMSPEat%s\"%t)\n maxlen = max(map(len,labels))\n trainMeans = np.nanmean(trainMAE, axis=0)\n validMeans = np.nanmean(validMAE, axis=0)\n testMeans = np.nanmean( testMAE, axis=0)\n print(\"{:>{maxlen}}{:>8}{:>8}{:>8}\".format(\"\",\"Train\",\"Valid\",\"Test\",maxlen=maxlen))\n for i in range(len(labels)):\n print( (\"{0:>{maxlen}}{1:8.2f}{2:8.2f}{3:8.2f}\".format (labels[i], trainMeans[i], validMeans[i], testMeans[i], maxlen=maxlen)) )\n \n scores_detail[\"train\"][labels[i]] = np.array(trainMAE)[:,i]\n scores_detail[\"valid\"][labels[i]] = np.array(validMAE)[:,i]\n scores_detail[\"test\"][labels[i]] = np.array(testMAE)[:,i]\n print(\"\\n\")\n \n \n if FOLDS_TVT == False:\n # PLOT\n internalConfig[\"trainYinv\"] = trainYinv\n internalConfig[\"validYinv\"] = validYinv\n internalConfig[\"testYinv\"] = testYinv\n else:\n trainPred = \"DUMMY\"\n validPred = \"DUMMY\"\n testPred = \"DUMMY\"\n \n internalConfig[\"list_df\"] = list_df\n \n \n return scores_detail\n\n\n# In[ ]:\n\n\na = np.array([ [1,2,3],\n [4,5,6],\n [7,8,9]])\na[:,1]\n\n\n# ## myLSTMModel\n\n# In[ ]:\n\n\ndef myLSTMModel(internalConfig, Config):\n SEED = Config['SEED']\n np.random.seed(SEED)\n set_random_seed(SEED*SEED)\n \n ic = internalConfig\n \n TIMESTEP = Config['TIMESTEP']\n TIMEDIST = Config[\"TIMEDIST\"]\n FOLDS_TVT = Config[\"FOLDS_TVT\"]\n PATIENCE = Config[\"PATIENCE\"]\n OWN_SAVE = Config[\"OWN_SAVE\"]\n OWN_LOAD = Config[\"OWN_LOAD\"]\n \n\n \n \n OVERWRITE_MODEL = Config[\"OVERWRITE_MODEL\"]\n TARGET = Config[\"TARGET\"]\n BATCH_SIZE = Config[\"BATCH_SIZE\"]\n THETA = Config[\"THETA\"]\n LAYERS = Config[\"LAYERS\"]\n DROP_RATE = Config[\"DROP_RATE\"]\n\n EPOCHS = Config[\"EPOCHS\"]\n LOSS = Config[\"LOSS\"]\n f_len = ic[\"f_len\"]\n y_len = ic[\"y_len\"]\n \n \n if FOLDS_TVT == False:\n #if TIMEDIST == True:\n # ic[\"trainY\"] = ic[\"trainY\"].reshape(-1, TIMESTEP, y_len )\n # ic[\"validY\"] = ic[\"validY\"].reshape(-1, TIMESTEP, y_len )\n # ic[\"testY\"] = ic[\"testY\"].reshape(-1, TIMESTEP, y_len )\n #else:\n # ic[\"trainY\"] = ic[\"trainY\"][:,-1]\n # ic[\"validY\"] = ic[\"validY\"][:,-1]\n # ic[\"testY\"] = ic[\"testY\"][:,-1]\n \n list_trainX = [ ic[\"trainX\"] ]\n list_trainY = [ ic[\"trainY\"] ]\n list_validX = [ ic[\"validX\"] ]\n list_validY = [ ic[\"validY\"] ]\n else:\n list_trainX = internalConfig['list_trainX']\n list_trainY = internalConfig['list_trainY']\n list_validX = internalConfig['list_validX']\n list_validY = internalConfig['list_validY']\n \n \n losses = []\n list_models = []\n \n LAYERS.reverse()\n DROP_RATE.reverse()\n \n for fold in range(0,len(list_trainX)):\n internalConfig['fold'] = fold + 1\n print(\"Using Fold index: %d/%d\"%(fold,len(list_trainX)-1) )\n trainX = list_trainX[fold]\n trainY = list_trainY[fold]\n validX = list_validX[fold]\n validY = list_validY[fold]\n \n \n f_len = trainX.shape[-1]\n print(\"trainX.shape,\",trainX.shape)\n print(\"trainY.shape,\",trainY.shape)\n print(\"validX.shape,\",validX.shape)\n print(\"validY.shape,\",validY.shape)\n \n \n es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=PATIENCE, restore_best_weights=True)\n \n FILE_NAME = create_filename(ic, Config)\n \n try:\n if OVERWRITE_MODEL == False:\n print(\"Loading Model...\")\n if OWN_LOAD:\n if FOLDS_TVT == True:\n last_FILE_NAME = \"%s/%s-%s.h5\"%(MODELS_FOLDER,OWN_LOAD, fold)\n classicModel = load_model( last_FILE_NAME )\n else:\n last_FILE_NAME = \"%s/%s.h5\"%(MODELS_FOLDER,OWN_LOAD)\n classicModel = load_model( last_FILE_NAME )\n else:\n classicModel = load_model(FILE_NAME)\n last_FILE_NAME = FILE_NAME\n \n list_models.append(classicModel)\n print(FILE_NAME + \" loaded =)\")\n print(\"LISTO\")\n else:\n print(\"Reentrenando \"+ FILE_NAME)\n load_model(\"noexisto_nijamas_existire.h5\")\n except:\n print(\"batch_input_shape:(%d,%d,%d)\"%(BATCH_SIZE,TIMESTEP,f_len))\n print(\"Loading Failed. Training again...\")\n classicModel = Sequential()\n \n \n ## SIN stateful\n if len(LAYERS) == 2:\n classicModel.add(LSTM(LAYERS[1], activation=\"sigmoid\", input_shape=(TIMESTEP, f_len), return_sequences=True))\n classicModel.add(Dropout(rate=DROP_RATE[1]))\n \n if TIMEDIST == True:\n classicModel.add(LSTM(LAYERS[0], activation=\"sigmoid\", input_shape=(TIMESTEP, f_len), return_sequences=True))\n classicModel.add(Dropout(rate=DROP_RATE[0]))\n classicModel.add( TimeDistributed( Dense( y_len, activation=\"linear\") ) )\n else:\n classicModel.add(LSTM(LAYERS[0], activation=\"sigmoid\", input_shape=(TIMESTEP, f_len), return_sequences=False))\n classicModel.add(Dropout(rate=DROP_RATE[0]))\n classicModel.add(Dense(y_len, activation=\"linear\"))\n \n if LOSS == \"\":\n classicModel.compile(loss='mean_squared_error', optimizer='adam')\n elif LOSS == \"atTHETA\":\n Yscaler = ic[\"scalers\"][\"Yscaler\"]\n scaled_theta = Yscaler.transform([[ Config[\"THETA\"] ]])\n classicModel.compile(loss=lambda y,f: lossAtTHETA(scaled_theta,y,f), optimizer='adam')\n \n classicModel.fit(trainX, trainY, epochs=EPOCHS, batch_size=BATCH_SIZE, validation_data=(validX,validY), callbacks=[es], verbose=1)#, shuffle=False)\n \n #classicModel.fit(trainX, trainY, epochs=EPOCHS, batch_size=BATCH_SIZE, verbose=2)\n classicModel.save(FILE_NAME)\n if FOLDS_TVT == True:\n classicModel.save(\"%s/%s-%s.h5\"%(MODELS_FOLDER, OWN_SAVE, fold))\n else:\n classicModel.save(\"%s/%s-%s.h5\"%(MODELS_FOLDER, OWN_SAVE))\n print(FILE_NAME + \" saved. = )\")\n \n list_models.append(classicModel)\n \n return list_models, FILE_NAME\n #ic[\"model\"] = classicModel\n #ic[\"file_name\"] = FILE_NAME\n\n\n# ## Pruebas\n# Para cada configuración, si ya hay un modelo entrenado, éste será cargado, si no existe un modelo entrenado para dicha configuración, entonces se entrenará y guardará.\n\n# In[ ]:\n\n\nSTATION, FILTER_YEARS, THETA = get_station(\"Parque_OHiggins\")\nLSTMconfig = {\n # import_merge_and_scale()\n \"STATION\" : STATION,\n \"SCALER\" : preprocessing.StandardScaler,\n \"IMPUTATION\" : None,\n \"AGREGADOS\" : [],#[\"O3\",\"TEMP\",\"WS\",\"RH\"], #[\"ALL\"] #Horas de los maximos que se quieren agregar.\n \"TARGET\" : \"O3\",\n \"PRECALC\" : precalcular_agregados(STATION),\n \"THETA\" : THETA,\n \"moreTHETA\" : [61],\n \"FUTURE\" : 1,\n \"PAST\" : False,\n \n # select_features()\n \"FILTER_YEARS\" : FILTER_YEARS,#[2004,2013], #En Veranos\n \"CUT\" : 0.41,\n \"FIXED_FEATURES\" : ['CO', 'PM10', 'PM25', 'NO', 'NOX', 'WD', 'RH', 'TEMP', 'WS', 'UVA', 'UVB', 'O3'], # empty list means that the CUT will be used\n \"BAN\" : [\"countEC\", \"EC\",\"O3btTHETA\"], # []\n \n #make_examples()\n \"FOLDS_TVT\" : True,\n \"TIMESTEP\" : 14,\n \"OVERLAP\" : True,\n \n #make_traintest() Ignored when FOLDS_TVT == True\n \"SHUFFLE\" : False,\n \"TRAINPCT\" : 0.85,\n \n \n #myLSTM()\n \"OVERWRITE_MODEL\" : True,\n \"MODEL_NAME\" : \"classicModel\",\n \"LAYERS\" : [19],\n \"DROP_RATE\": [0.614707658384578],\n \"BATCH_SIZE\" : 16,\n \"EPOCHS\" : 400,\n \"PATIENCE\" : 20,\n \"TIMEDIST\" : False,\n \"LOSS\" : \"\", # \"\" or \"atTHETA\"\n \"GRAPH\" : True,\n \"OWN_SAVE\" : \"MiModelo\",\n \"OWN_LOAD\" : None, #\"MiModelo\",\n #Y to calc Error. For Test Only.\n \"Yx\" : 0 # DEFAULT 0\n }\n#myLSTMoutput = myLSTM(LSTMconfig)\n\n\n# In[ ]:\n\n\n#LSTMconfig[\"GRAPH\"] = False\n#LSTMconfig[\"moreTHETA\"] = []#[89, 76, 61]\n#a =myLSTMPredict(myLSTMoutput, LSTMconfig)\n\n\n# ## Pruebas con varias seeds\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\nSTATION, FILTER_YEARS, THETA = get_station(\"INDEP 4F\")\nLSTMconfig = {\n # import_merge_and_scale()\n \"STATION\" : STATION,\n \"SCALER\" : preprocessing.StandardScaler,\n \"IMPUTATION\" : None,\n \"AGREGADOS\" : [],#[\"O3\",\"TEMP\",\"WS\",\"RH\"], #[\"ALL\"] #Horas de los maximos que se quieren agregar.\n \"TARGET\" : \"O3\",\n \"PRECALC\" : precalcular_agregados(STATION),\n \"THETA\" : THETA,\n \"moreTHETA\" : [61],\n \"FUTURE\" : 1,\n \"PAST\" : False,\n \n # select_features()\n \"FILTER_YEARS\" : FILTER_YEARS,#[2004,2013], #En Veranos\n \"CUT\" : 0.41,\n \"FIXED_FEATURES\" : ['CO', 'PM10', 'PM25', 'NO', 'NOX', 'WD', 'RH', 'TEMP', 'WS', 'UVA', 'UVB', 'O3'], # empty list means that the CUT will be used\n \"BAN\" : [\"countEC\", \"EC\",\"O3btTHETA\"], # []\n \n #make_examples()\n \"FOLDS_TVT\" : True,\n \"TIMESTEP\" : 2,\n \"OVERLAP\" : True,\n \n #make_traintest() Ignored when FOLDS_TVT == True\n \"SHUFFLE\" : False,\n \"TRAINPCT\" : 0.85,\n \n \n #myLSTM()\n \"OVERWRITE_MODEL\" : False,\n \"MODEL_NAME\" : \"classicModel\",\n \"LAYERS\" : [3],\n \"DROP_RATE\": [0.0687025004823643],\n \"BATCH_SIZE\" : 16,\n \"EPOCHS\" : 400,\n \"PATIENCE\" : 20,\n \"TIMEDIST\" : False,\n \"LOSS\" : \"\", # \"\" or \"atTHETA\"\n \"GRAPH\" : False,\n \"OWN_SAVE\" : \"MiModelo\",\n \"OWN_LOAD\" : None, #\"MiModelo\",\n #Y to calc Error. For Test Only.\n \"Yx\" : 0 # DEFAULT 0\n }\n'''\nseeds = [123, 57, 872, 340, 77, 583, 101, 178, 938, 555]\nall_scores = []\nall_outputs = []\nfor s in seeds:\n LSTMconfig[\"SEED\"] = s\n myLSTMoutput = myLSTM(LSTMconfig)\n all_outputs.append(myLSTMoutput)\n all_scores.append( myLSTMPredict(myLSTMoutput, LSTMconfig) )\n'''\n\n\n# In[ ]:\n\n\n'''\nall_metrics = [\"RMSE\", \"RMSEat%s\"%THETA]\nfor m in all_metrics:\n for d in ['train', 'valid', 'test']:\n fmeans = []\n for i in range( len(seeds) ):\n fmeans.append( np.nanmean(all_scores[i][d][m]) ) #mean over folds\n mean = np.mean(fmeans)\n std = np.std(fmeans)\n print(m, d, mean, std)\n'''\n\n\n# In[ ]:\n\n\n#STOP\n\n\n# In[ ]:\n\n\n\n\n\n# # LSTM 2Y-Loss\n\n# ## repeatY\n\n# In[ ]:\n\n\ndef repeatY(internalConfig, cant):\n trainY = internalConfig[\"trainY\"]\n testY = internalConfig[\"testY\"]\n TIMEDIST = False\n \n print(\"Repeating Y ..\")\n \n if TIMEDIST == True:\n pass\n else:\n trainYr = trainY[:, -1, :]\n testYr = testY[:, -1, :]\n \n trainY = trainY[:, -1, 0, None]\n testY = testY[:, -1, 0, None]\n \n #if cant > 1:\n for i in range(1,cant):\n #trainYq = np.hstack([trainYq, trainY[:,None]])\n #testYq = np.hstack([testYq, testY[:,None]])\n trainYr = np.hstack([trainYr, trainY[:]])\n testYr = np.hstack([testYr, testY[:]])\n \n print(\" trainYq.shape, \", trainYr.shape)\n print(\" testYq.shape, \", testYr.shape)\n \n return trainYr, testYr\n\n\n# ## LSTM_2Y\n\n# In[ ]:\n\n\ndef LSTM_2Y(Config): #, AGREGADOS, TARGET, PRECALC, THETA, FUTURE, PAST, FEATURES, CUT, BAN, TIMESTEP, OVERLAP, BATCH_SIZE, TRAINPCT, OVERWRITE_MODEL, MODEL_NAME, LAYERS, EPOCHS, TIMEDIST, Yx):\n scalers = {}\n ic = {\"scalers\": scalers}\n \n Config[\"CLASSIFIER_CONFIG\"][\"PRED_TYPE\"] = Config[\"PRED_TYPE\"]\n classifierOutput = ClassifierLSTM(Config[\"CLASSIFIER_CONFIG\"])\n ic[\"classifierOutput\"] = classifierOutput\n \n np.random.seed(123)\n set_random_seed(2)\n \n Config['SHIFT'] = Config['FUTURE'] * -1\n \n #SHIFT = FUTURE*-1\n #PRECALC = precalcular_agregados()\n \n \n \n \n complete_dataset, ylabels, Yscaler, h24scaler = import_merge_and_scale(Config, verbose=False)\n ic[\"complete_dataset\"] = complete_dataset\n ic[\"ylabels\"] = ylabels\n scalers['Yscaler'] = Yscaler\n scalers[\"h24scaler\"] = h24scaler\n \n \n y_len = len(ic[\"ylabels\"])\n ic[\"y_len\"] = y_len\n \n \n data, features = select_features(ic, Config)\n ic[\"data\"] = data\n ic[\"features\"] = features\n \n ic[\"f_len\"] = len(ic[\"features\"])\n \n ic[\"secuencias\"] = obtener_secuencias(ic)\n \n \n \n examples, y_examples, dateExamples = make_examples(ic, Config, verbose=False)\n ic[\"examples\"] = examples\n ic[\"y_examples\"] = y_examples\n ic[\"dateExamples\"] = dateExamples\n \n # Merge with classifier\n \n \n \n \n trainX, trainY, testX, testY, dateTrain, dateTest = make_traintest(ic, Config, verbose=False)\n ic[\"trainX\"] = trainX\n ic[\"trainY\"] = trainY\n ic[\"testX\"] = testX\n ic[\"testY\"] = testY\n ic[\"dateTrain\"] = dateTrain\n ic[\"dateTest\"] = dateTest\n \n print(\"YY\",trainY.shape)\n trainY, testY = repeatY(ic, 2)\n ic[\"trainY\"] = trainY\n ic[\"testY\"] = testY\n print(\"YY\",trainY.shape)\n print(trainY)\n \n \n ### Model\n classicModel , file_name = LSTM_2YModel(ic, Config)\n ic[\"model\"] = classicModel\n ic[\"file_name\"] = file_name\n \n ### Prediction\n LSTM_2YPredict(ic, Config)\n #trainPred, testPred, df = LSTM_2YPredict(ic, Config)\n #ic[\"trainPred\"] = trainPred\n #ic[\"testPred\"] = testPred\n #ic[\"df\"] = df\n \n return ic\n\n\n# ## LSTM_2YPredict\n\n# In[ ]:\n\n\ndef LSTM_2YPredict(internalConfig, Config):\n TIMEDIST = Config[\"TIMEDIST\"]\n BATCH_SIZE = Config[\"BATCH_SIZE\"]\n THETA = Config[\"THETA\"]\n TARGET = Config[\"TARGET\"]\n LAYERS = Config[\"LAYERS\"]\n PRED_TYPE = Config[\"PRED_TYPE\"]\n Yx = Config[\"Yx\"]\n Yscaler = internalConfig[\"scalers\"][\"Yscaler\"]\n trainX = internalConfig[\"trainX\"]\n trainY = internalConfig[\"trainY\"]\n testX = internalConfig[\"testX\"]\n testY = internalConfig[\"testY\"]\n dateTrain = internalConfig[\"dateTrain\"]\n dateTest = internalConfig[\"dateTest\"]\n classicModel = internalConfig[\"model\"]\n classifierOutput = internalConfig[\"classifierOutput\"]\n \n # Predictions\n print(\"Calculando Predicciones\")\n trainPred = classicModel.predict(trainX, batch_size=BATCH_SIZE)\n classicModel.reset_states()\n testPred = classicModel.predict(testX, batch_size=BATCH_SIZE)\n classicModel.reset_states()\n \n #TimeDistributed\n ##T = Yx\n ##if TIMEDIST == True:\n ## print(trainPred.shape)\n ## print(trainY.shape)\n ## trainPred = trainPred[:,-1, T].reshape(-1, 1)\n ## trainY = trainY[:,-1,0].reshape(-1, 1)\n ## testPred = testPred[:,-1, T].reshape(-1, 1)\n ## testY = testY[:,-1,0].reshape(-1, 1)\n ## print(trainPred.shape)\n ## print(trainPred.shape)\n ##else:\n ## print(trainPred.shape)\n ## print(trainY.shape)\n ## trainPred = trainPred[:, T, None]\n ## trainY = trainY[:, T, None]\n ## testPred = testPred[:, T, None]\n ## testY = testY[:, T, None]\n ## print(trainPred.shape)\n ## print(trainY.shape)\n \n ###print(\"###\", trainPred)\n ####print(\"###\", trainY)\n ###\n classTrainPred = classifierOutput[\"trainPred\"]\n classTrainDate = classifierOutput[\"dateTrain\"]\n ###df1 = join_index(classTrainDate, classTrainPred, \"classTrain\")\n ###\n classTestPred = classifierOutput[\"testPred\"]\n classTestDate = classifierOutput[\"dateTest\"]\n ###df2 = join_index(classTestDate, classTestPred, \"classTest\")\n ###\n ###dfDate = pd.concat([df1,df2], axis=0)\n ###\n ###print( dateTrain.shape, trainPred.shape ) \n ###dfTrain = join_index(dateTrain, trainPred, \"trainPred\" )\n ###print(len(dfDate))\n ###print(dfTrain)\n ###dfTrain[\"classTest\"] = dfDate\n ###print(dfTrain)\n \n #print(\"QQ\",len(trainPred),len(classTrainPred))\n #print(\"QQ\",len(testPred),len(classTestPred))\n \n if PRED_TYPE == \"hard\":\n ntrainPred = []\n for i in range(len(trainPred)):\n #ntrainPred.append( [ trainPred[i, int(classTrainPred[i][0])] ] )\n #v = int( classifierOutput[\"trainY\"][i]) # Caso Ideal\n v = int( classifierOutput[\"trainPred\"][i]) # Prediccion 'hard'\n ntrainPred.append( [ trainPred[i, v] ] )\n if classTrainDate[i] != dateTrain[i]:\n print(\"!!!\",True)\n ntrainPred = np.array(ntrainPred)\n \n ntestPred = []\n for i in range(len(testPred)):\n #ntestPred.append( [ testPred[i, int(classTestPred[i][0])] ] )\n #v=int(classifierOutput[\"testY\"][i]) # Caso Ideal\n v=int(classifierOutput[\"testPred\"][i]) # Prediccion 'hard'\n ntestPred.append( [ testPred[i, v] ] )\n if classTestDate[i] != dateTest[i]:\n print(\"!!!,test\",True)\n ntestPred = np.array(ntestPred)\n elif PRED_TYPE == \"soft\":\n ntrainPred = []\n for i in range(len(trainPred)):\n upperProb = float( classifierOutput[\"trainPred\"][i]) # Prediccion 'soft'\n lowerProb = 1-upperProb\n ntrainPred.append( [ trainPred[i, 0]*lowerProb + trainPred[i, 1]*upperProb ] )\n if classTrainDate[i] != dateTrain[i]:\n print(\"!!!\",True)\n ntrainPred = np.array(ntrainPred)\n \n ntestPred = []\n for i in range(len(testPred)):\n upperProb = float( classifierOutput[\"testPred\"][i]) # Prediccion 'soft'\n lowerProb = 1-upperProb\n ntestPred.append( [ testPred[i, 0]*lowerProb + testPred[i, 1]*upperProb ] )\n if classTestDate[i] != dateTest[i]:\n print(\"!!!\",True)\n ntestPred = np.array(ntestPred)\n \n #print(ntrainPred)\n \n \n # Invert Predictions\n ntrainPred = Yscaler.inverse_transform(ntrainPred)\n trainYinv = Yscaler.inverse_transform(trainY[:, 0])\n \n ntestPred = Yscaler.inverse_transform(ntestPred)\n testYinv = Yscaler.inverse_transform(testY[:, 0])\n \n #print(\"###\", ntrainPred[:5])\n #print(\"###\", trainYinv[:5])\n \n #for x,y,z,zz in zip(trainYinv,classifierOutput[\"trainY\"], dateTrain, classifierOutput[\"dateTrain\"]):\n # print(z,zz,x,y, x>=THETA, ((x >= THETA) != y)*999999999999)\n \n \n \n # calculate root mean squared error\n print(\"Calculando Error\")\n trainScore = math.sqrt(mean_squared_error(trainYinv, ntrainPred))\n print(' Train Score: %.2f RMSE' % (trainScore))\n testScore = math.sqrt(mean_squared_error(testYinv, ntestPred))\n print(' Test Score: %.2f RMSE' % (testScore))\n \n print(\"Calculando RMSEat%d\"%THETA)\n trainScore = RMSEat(THETA,trainYinv, ntrainPred)\n print(' Train Score: %.2f RMSEat%.2f' % (trainScore,THETA))\n testScore = RMSEat(THETA,testYinv, ntestPred)\n print(' Test Score: %.2f RMSEat%.2f' % (testScore, THETA))\n \n '''\n print(\"mean trainY <, >, mean:\", np.mean(trainYinv[trainYinv=THETA], axis=0),np.mean(trainYinv, axis=0) )\n v = internalConfig[\"classifierOutput\"][\"trainY\"].astype(dtype=bool).flatten()\n v1 = (1-internalConfig[\"classifierOutput\"][\"trainY\"]).astype(dtype=bool).flatten()\n t= Yscaler.inverse_transform(trainPred)\n print(trainPred.shape, t.shape, v.shape, v1.shape)\n print(\"mean trainP <, >, mean:\",np.mean(t[v1,0], axis=0), np.mean(t[v,1], axis=0),np.mean(t[:,2], axis=0))\n print(\"mean trainPred <, >, mean:\",np.mean(Yscaler.inverse_transform(trainPred), axis=0))\n print(\"mean nTrainPred:\",np.mean(ntrainPred, axis=0))\n print(\"====\")\n print(\"mean testY <, >, mean:\", np.mean(testYinv[testYinv=THETA], axis=0),np.mean(testYinv, axis=0) )\n v = internalConfig[\"classifierOutput\"][\"testY\"].astype(dtype=bool).flatten()\n v1 = (1-internalConfig[\"classifierOutput\"][\"testY\"]).astype(dtype=bool).flatten()\n t= Yscaler.inverse_transform(testPred)\n print(testPred.shape, t.shape, v.shape, v1.shape)\n print(\"mean testP <, >, mean:\",np.mean(t[v1,0], axis=0), np.mean(t[v,1], axis=0),np.mean(t[:,2], axis=0))\n print(\"mean testPred <, >, mean:\",np.mean(Yscaler.inverse_transform(testPred), axis=0))\n print(\"mean nTestPred:\",np.mean(ntestPred, axis=0))\n '''\n \n \n # PLOT\n print(\"Graficando...\")\n df1 = join_index(dateTrain,trainYinv, \"Y\")\n df2 = join_index(dateTrain,ntrainPred, \"f train\")\n \n df3 = join_index(dateTest,testYinv, \"Y\")\n df4 = join_index(dateTest,ntestPred, \"f test\")\n \n ydf = pd.concat([df1, df3], axis=0)\n df = pd.concat( [ydf, df4,df2] ,axis=1)\n \n #traindf = pd.concat([df1,df2], axis=1)\n #testdf = pd.concat([df3,df4], axis=1)\n \n #df = pd.concat([traindf,testdf])\n \n #fecha_test = testdf.index[0]\n #df.asfreq(\"D\").iplot(title = \"%s - %s\"%(TARGET, LAYERS), vline=[fecha_test], hline=[THETA])\n df.asfreq(\"D\").iplot(title = \"%s - %s\"%(TARGET, LAYERS), hline=[THETA])\n \n df = df.asfreq(\"D\")\n \n \n \n\n\n# In[ ]:\n\n\na = np.array([[1,2],[3,4],[5,6]])\nf = [True,False,True]\na.flatten()\n\n\n# ## LSTM_2YModel\n\n# In[ ]:\n\n\ndef LSTM_2YModel(internalConfig, Config):\n ic = internalConfig\n \n TIMESTEP = Config['TIMESTEP']\n TIMEDIST = Config[\"TIMEDIST\"]\n \n #if TIMEDIST == True:\n # ic[\"trainY\"] = ic[\"trainY\"].reshape(-1, TIMESTEP, y_len )\n # ic[\"testY\"] = ic[\"testY\"].reshape(-1, TIMESTEP, y_len )\n #else:\n # ic[\"trainY\"] = ic[\"trainY\"][:,-1]\n # ic[\"testY\"] = ic[\"testY\"][:,-1]\n \n \n OVERWRITE_MODEL = Config[\"OVERWRITE_MODEL\"]\n TARGET = Config[\"TARGET\"]\n BATCH_SIZE = Config[\"BATCH_SIZE\"]\n THETA = Config[\"THETA\"]\n LAYERS = Config[\"LAYERS\"]\n DROP_RATE = Config[\"DROP_RATE\"]\n EPOCHS = Config[\"EPOCHS\"]\n LOSS = Config[\"LOSS\"]\n f_len = ic[\"f_len\"]\n trainX = ic[\"trainX\"]\n trainY = ic[\"trainY\"]\n testX = ic[\"testX\"]\n testY = ic[\"testY\"]\n y_len = ic[\"y_len\"]\n \n Yscaler = ic[\"scalers\"][\"Yscaler\"]\n scaled_theta = Yscaler.transform([[ THETA ]])\n \n FILE_NAME = create_filename(ic, Config)\n \n print(\"###\", trainY.shape)\n print(\"###\", testY.shape)\n \n try:\n if OVERWRITE_MODEL == False:\n print(\"Loading Model...\")\n classicModel = load_model(FILE_NAME, custom_objects = {'': lambda y,f: loss2YatTHETA(scaled_theta,y,f)})\n print(FILE_NAME + \" loaded =)\")\n print(\"LISTO\")\n else:\n print(\"Reentrenando \"+ FILE_NAME)\n load_model(\"noexisto_nijamas_existire.h5\")\n except:\n print(\"batch_input_shape:(%d,%d,%d)\"%(BATCH_SIZE,TIMESTEP,f_len))\n classicModel = Sequential()\n \n for Neurons in LAYERS[:-1]:\n classicModel.add(LSTM(Neurons, activation=\"sigmoid\", input_shape=(TIMESTEP, f_len), return_sequences=True))\n classicModel.add(Dropout(rate=DROP_RATE))\n \n if TIMEDIST == True:\n classicModel.add(LSTM(LAYERS[-1], activation=\"sigmoid\", input_shape=(TIMESTEP, f_len), return_sequences=True))\n classicModel.add(Dropout(rate=DROP_RATE))\n classicModel.add( TimeDistributed( Dense( y_len, activation=\"linear\") ) )\n print(\"JEJEJE\")\n else:\n classicModel.add(LSTM(LAYERS[-1], activation=\"sigmoid\", input_shape=(TIMESTEP, f_len), return_sequences=False))\n classicModel.add(Dropout(rate=DROP_RATE))\n classicModel.add(Dense( trainY.shape[1], activation=\"linear\"))\n \n #qModel = Sequential()\n #for Neurons in LAYERS[:-1]:\n # qModel.add(LSTM(Neurons, input_shape=(TIMESTEP, f_len), return_sequences=True))\n # qModel.add(Dropout(rate=DROP_RATE))\n #\n #if TIMEDIST == True:\n # pass\n #else:\n # qModel.add(LSTM(LAYERS[-1], input_shape=(TIMESTEP, f_len), return_sequences=False))\n # qModel.add(Dropout(rate=DROP_RATE))\n # qModel.add(Dense( y_len + len(QUANTILES) ))\n # qModel.compile(loss=lambda y,f: quantil_loss(QUANTILES, y_len,y,f), optimizer='adam')\n\n classicModel.compile(loss=lambda y,f: loss2YatTHETA(scaled_theta,y,f), optimizer='adam')\n #classicModel.compile(loss='mean_squared_error', optimizer='adam')\n \n classicModel.fit(trainX, trainY, epochs=EPOCHS, batch_size=BATCH_SIZE, verbose=1, shuffle=False)\n\n #classicModel.fit(trainX, trainY, epochs=EPOCHS, batch_size=BATCH_SIZE, verbose=2)\n classicModel.save(FILE_NAME)\n print(FILE_NAME + \" saved. = )\")\n \n return classicModel, FILE_NAME\n #ic[\"model\"] = classicModel\n #ic[\"file_name\"] = FILE_NAME\n\n\n# ## Pruebas - cuantílica\n\n# In[ ]:\n\n\nSTATION = \"Las_Condes\"\nqConfig = {\n \"SCALER\" : preprocessing.StandardScaler,\n \"AGREGADOS\":[],\n \"TARGET\": \"O3\",\n \"PRECALC\" : precalcular_agregados(STATION),\n \"THETA\":1,\n \"FUTURE\":1, #must be 1\n \"PAST\":False, #must be False\n \n \"FIXED_FEATURES\":[],\n \"CUT\":0.26,\n \"BAN\":[\"countEC\",\"EC\",\"O3btTHETA\"],\n \n \"TIMESTEP\" : 14,\n \"OVERLAP\":True,\n \n \"SHUFFLE\" : False,\n \"BATCH_SIZE\": 16,\n \"TRAINPCT\":0.85,\n \n \"OVERWRITE_MODEL\":False,\n \"MODEL_NAME\" : \"qModel\",\n \"QUANTILES\":[0.5],\n \"LAYERS\" : [3],\n \"DROP_RATE\": 0.4,\n \"EPOCHS\" : 250,\n \"TIMEDIST\" : False, #Sin implementar\n \"WW\" : None,\n \n }\n#Qoutput = quantileLSTM(qConfig)\n\n\n# In[ ]:\n\n\n#WW = Qoutput[\"model\"].get_weights()\n\n\n# In[ ]:\n\n\n'''\ntrainTemp, trainDate = Qoutput[\"trainPred\"][:,0], Qoutput[\"dateTrain\"]\ntestTemp, testDate = Qoutput[\"testPred\"][:,0], Qoutput[\"dateTest\"]\npredTemp = np.hstack([trainTemp,testTemp])\ndateTemp = np.hstack([trainDate,testDate])\nnew_column = join_index(dateTemp,predTemp,\"O3pred\")\n\nadata = Qoutput[\"data\"].copy()\ncolumns = adata.columns\ndata = Qoutput[\"complete_dataset\"].copy()\n#new_column\ndata[\"O3\"]=Qoutput[\"scalers\"][\"Yscaler\"].inverse_transform(data[\"O3\"])\ndata = pd.concat([data,new_column],axis=1)\n#data[\"O3pred\"] = new_column\n#data = data[[\"O3\",\"O3pred\"]]\n#data = data[list(columns)+[\"O3pred\"]]\n#data[\"O3pred\"] = data[\"O3pred\"].shift(-1) #EL SHIFT se debe hacer para el cuando se considere como TARGET Y\n\n#print(data[\"O3pred\"].describe())\n#print(data)\n#print(data[\"O3pred\"])\n#print(data2[\"O3pred\"].describe())\n\ntemp = data.copy()\n#data2 = data.dropna()\n#print(data.describe())\n\nprint(data[\"O3\"].describe())\ndata2 = data.asfreq(\"D\")\nprint( np.mean(data[columns].dropna().index == data2[columns].dropna().index))\nprint(data2[\"O3\"].describe())\nprint(len(data))\na = data.apply( lambda x: float(\"188888\") if np.isnan(x[\"O3pred\"]) else (x[\"O3\"] > x[\"O3pred\"])*1, axis=1 )\nprint(a.describe())\n#data[\"O3btPred\"] = (data[\"O3\"] > data[\"O3pred\"])*1\n\ndata[\"O3btPred\"] = a\ndata = data[list(columns)+[\"O3btPred\"]]\ndata = data.dropna()\ndata[\"y\"] = data[\"O3btPred\"].shift(1)\n#data = data.dropna().drop(\"O3btPred\",axis=1)\n#data = data.drop(\"O3btPred\", axis=1).dropna()\nprint(data[[\"O3\",\"y\"]])\nprint(adata[[\"O3\",\"y\"]])\nprint( sum(data.index == adata.index))\n\n# FALTA HACER QUE LOS COINCIDAN BIEN LOS DATOS, SIN LUGAR A ERROR\n# CONSIDERAR QUE AL HACER EL SHIFT LOS DIAS DEBE ESTAR CORRELATIVOS\n#\n#\n\n\n#\n#temp = temp.dropna()\n##data[\"O3btPred\"] = temp[\"O3btPred\"]\n##data[[\"O3\",\"O3pred\",\"O3btPred\"]]\n##adata[\"O3btPred\"] = data[\"O3btPred\"]\n#adata[\"O3btPred\"].fillna(value=adata[\"y\"])\n#adata[\"O3btPred\"].describe()\n#print(random.choice([0.,1.]) if np.isnan(123) else 123 )\n#print(data[\"O3btPred\"].describe())\n##p=adata[\"O3btPred\"].describe()[1]\n##adata[\"O3btPred\"] = adata[\"O3btPred\"].apply( lambda x: np.random.choice([0,1],p=[1-p,p]) if np.isnan(x) else x )\n#print(adata[\"O3btPred\"].describe())\n#print(adata[\"KK\"].describe())\n\n'''\n\n\n# In[ ]:\n\n\n'''\ntrainPred, trainY, dateTrain = Qoutput[\"trainPred\"][:,0].copy(), Qoutput[\"trainYtrue\"][:,0].copy() , Qoutput[\"dateTrain\"].copy()\ntestPred, testY, dateTest = Qoutput[\"testPred\"][:,0].copy(), Qoutput[\"testYtrue\"][:,0].copy() , Qoutput[\"dateTest\"].copy()\n\ntrainX, testX = Qoutput[\"trainX\"], Qoutput[\"testX\"]\n\ntrainX.shape, trainPred.shape, trainY.shape\nfor i in range(len(trainY)):\n if trainY[i] > trainPred[i]:\n trainY[i] = 1.\n else:\n trainY[i] = 0.\ntrainY = trainY[:, None]\n\nfor i in range(len(testY)):\n if testY[i] > testPred[i]:\n testY[i] = 1.\n else:\n testY[i] = 0.\ntestY = testY[:, None]\n\nQoutput[\"trainY\"] = trainY\nQoutput[\"testY\"] = testY\n\n'''\n\n\n# ## Pruebas - clasificadora\n\n# In[ ]:\n\n\nSTATION = \"Las_Condes\"\nClassifierLSTMconfig={\n # import_merge_and_scale()\n \"SCALER\" : preprocessing.StandardScaler,\n \"AGREGADOS\" : [],#[\"O3\",\"TEMP\",\"WS\",\"RH\"], #[\"ALL\"] #Horas de los maximos que se quieren agregar.\n \"PRECALC\" : precalcular_agregados(STATION),\n \"TARGET\" : \"O3btTHETA\",\n \"THETA\" : 1,\n \"FUTURE\" : 1,\n \"PAST\" : False,\n \n # select_features()\n \"FIXED_FEATURES\" : [], # empty list means that the CUT will be used\n \"CUT\" : 0.26,\n \"BAN\" : [\"countEC\",\"EC\",\"O3btTHETA\"],#[\"countEC\", \"EC\"],# []\n \n #make_examples()\n \"USE_EXAMPLES\" : \"DUMMY\",#Qoutput,\n \"TIMESTEP\" : 14,\n \"OVERLAP\" : True,\n \n #make_traintest()\n \"SHUFFLE\" : False,\n \"BATCH_SIZE\" : 16,\n \"TRAINPCT\" : 0.85,\n \n #myLSTM()\n \"OVERWRITE_MODEL\" : True,\n \"MODEL_NAME\" : \"ClassifierO3btTHETAModel\",\n \"LAYERS\" : [1],\n \"EPOCHS\" : 100,\n \"DROP_RATE\" : 0.4,\n \"TIMEDIST\" : False, #SIN IMPLEMENTAR\n \n # Y \n \"PRED_TYPE\" : \"hard\",\n }\n#ClassifierOutput = ClassifierLSTM( ClassifierLSTMconfig )\n\n\n# In[ ]:\n\n\n#co = ClassifierOutput\n#ClassPredict(co, ClassifierLSTMconfig)\n\n\n# In[ ]:\n\n\n#print((pd.DataFrame(ClassifierOutput[\"trainY\"])).describe())\n#print((pd.DataFrame(ClassifierOutput[\"testY\"])).describe())\n\n\n# ## Pruebas - LSTM\n\n# In[ ]:\n\n\nSTATION = \"Las_Condes\"\nLSTM2Yconfig = {\n \"CLASSIFIER_CONFIG\" : ClassifierLSTMconfig,\n # import_merge_and_scale()\n \"SCALER\" : preprocessing.StandardScaler,\n \"AGREGADOS\" : [],#[\"O3\",\"TEMP\",\"WS\",\"RH\"], #[\"ALL\"] #Horas de los maximos que se quieren agregar.\n \"TARGET\" : \"O3\",\n \"PRECALC\" : precalcular_agregados(STATION),\n \"THETA\" : 92,\n \"FUTURE\" : 1,\n \"PAST\" : False,\n \n # select_features()\n \"FIXED_FEATURES\" : [], # empty list means that the CUT will be used\n \"CUT\" : 0.26,\n \"BAN\" : [],#[\"countEC\", \"EC\"],# []\n \n #make_examples()\n \"TIMESTEP\" : 2,\n \"OVERLAP\" : True,\n \n #make_traintest()\n \"SHUFFLE\" : True,\n \"BATCH_SIZE\" : 16,\n \"TRAINPCT\":0.85,\n \n #myLSTM()\n \"OVERWRITE_MODEL\" : False,\n \"MODEL_NAME\" : \"LSTM2Ymodel\",\n \"LAYERS\" : [3],\n \"DROP_RATE\": 0.4,\n \"EPOCHS\" : 100,\n \"TIMEDIST\" : False, #Sin Implementar\n \"LOSS\" : \"\", # \"\" or \"atTHETA\"\n \n \n #Y to calc Error. For Test Only.\n \"PRED_TYPE\" : \"soft\", # sobreescribe PRED_TYPE de la clasificadora\n \"Yx\" : 0 # DEFAULT 0\n }\n#LSTM_2Youtput = LSTM_2Y(LSTM2Yconfig)\n\n\n# In[ ]:\n\n\n#l2y=LSTM_2Youtput\n#LSTM_2YPredict(l2y,LSTM2Yconfig)\n\n\n# In[ ]:\n\n\n#a=LSTM_2Youtput\n##print(a[\"dateExamples\"])\n##print(a[\"classifierOutput\"][\"dateExamples\"])\n#b = a[\"complete_dataset\"].copy()\n#print(a[\"complete_dataset\"][[\"O3\",\"O3btTHETA\"]])\n#print(a[\"complete_dataset\"][\"O3\"].dropna() > 0)\n#b[\"jeje\"] = b[\"O3\"].dropna() > 0\n#b[[\"O3\",\"jeje\"]].describe()\n#a[\"data\"][\"O3\"][\"2003-12-14\":\"2003-12-14\"][0]\n##print(a[\"classifierOutput\"][\"complete_dataset\"].describe())\n\n\n# In[ ]:\n\n\n#a = pd.DataFrame([1,2,3,4,5,6,7,8,9])\n#b = pd.DataFrame([10,11,12,13])\n##a[1] = b\n#pd.concat([a,b], axis=1)\n\n\n# # Quantile Regression LSTM\n\n# ## make_qY\n# Replica los array con las etiquetas Y las veces necesarias para entrenar el valor promedio y los cuantiles.\n\n# In[ ]:\n\n\n# Modify trainY and testY for Quantile Regression\ndef make_qY(internalConfig, Config):\n trainY = internalConfig[\"trainY\"]\n validY = internalConfig[\"validY\"]\n testY = internalConfig[\"testY\"]\n QUANTILES = Config[\"QUANTILES\"]\n TIMEDIST = Config[\"QUANTILES\"]\n \n print(\"Making Y for quantiles\")\n ## Si hay problemas hacer train[:,None]\n #print(trainY.shape)\n #print(trainY[:,None].shape)\n #trainYq = trainY[:,None]\n #testYq = testY[:,None]\n \n if TIMEDIST == True:\n pass\n else:\n trainYq = trainY[:, -1, :]\n validYq = validY[:, -1, :]\n testYq = testY[:, -1, :]\n \n trainY = trainY[:, -1, 0, None]\n validY = validY[:, -1, 0, None]\n testY = testY[:, -1, 0, None]\n \n \n for i in range(len(QUANTILES)):\n #trainYq = np.hstack([trainYq, trainY[:,None]])\n #testYq = np.hstack([testYq, testY[:,None]])\n trainYq = np.hstack([trainYq, trainY[:]])\n validYq = np.hstack([validYq, validY[:]])\n testYq = np.hstack([testYq, testY[:]])\n \n print(\" trainYq.shape, \", trainYq.shape)\n print(\" testYq.shape, \", testYq.shape)\n return trainYq, validYq, testYq\n\n\n# ## quantil_loss\n# Función de perdida para entrenar los cuantiles. \n# El valor total de la función contempla la salida del promedio y la de cada cuantil.\n\n# In[ ]:\n\n\n# Loss Function\ndef quantil_loss_old(quantiles, ylen, ytrue, ypred):\n loss = 0\n for i in range(ylen):\n loss += K.mean(K.square(ytrue[:, i]-ypred[:, i]), axis=-1)\n \n #loss = K.mean(K.square(ytrue[:, 0]-ypred[:, 0]), axis=-1)\n \n for k in range(len(quantiles)):\n q = quantiles[k]\n e = (ytrue[:, ylen+k]-ypred[:, ylen+k])\n loss += K.mean(q*e + K.clip(-e, K.epsilon(), np.inf), axis=-1)\n return loss\n\n\n# In[ ]:\n\n\n# Loss Function\ndef quantil_loss(quantiles, ylen, qlen, ytrue, ypred):\n loss = 0\n \n for i in range(ylen):\n loss += K.mean(K.square(ytrue[:,0] - ypred[:, i]), axis=-1)\n \n #loss = K.mean(K.square(ytrue[:, 0]-ypred[:, 0]), axis=-1)\n \n for k in range(qlen):\n q = quantiles[k]\n e = ( ytrue[:,0] - ypred[:, ylen+k])\n loss += K.mean(q*e + K.clip(-e, K.epsilon(), np.inf), axis=-1)\n return loss\n\n\n# In[ ]:\n\n\n# Loss Function\ndef meanquantil_loss(quantiles, ylen,qlen, ytrue, ypred):\n loss = 0\n \n for i in range(ylen):\n loss += K.mean(K.square(ytrue[:,0] - ypred[:, i]), axis=-1)\n \n #loss = K.mean(K.square(ytrue[:, 0]-ypred[:, 0]), axis=-1)\n for k in range(qlen):\n q = quantiles[k]\n e = ( ytrue[:,0] - ypred[:, ylen+k])\n loss += K.mean(q*e + K.clip(-e, K.epsilon(), np.inf), axis=-1)\n return loss/(ylen+qlen)\n\n\n# In[ ]:\n\n\n# Loss Function\ndef meanquantil_loss2(quantiles, ylen,qlen, ytrue, ypred):\n loss = 0\n \n for i in range(ylen):\n loss += K.mean(K.square(ytrue[:,0] - ypred[:, i]), axis=-1)\n \n #loss = K.mean(K.square(ytrue[:, 0]-ypred[:, 0]), axis=-1)\n qloss = 0\n for k in range(qlen):\n q = quantiles[k]\n e = ( ytrue[:,0] - ypred[:, ylen+k])\n qloss += K.mean(q*e + K.clip(-e, K.epsilon(), np.inf), axis=-1)\n qloss = qloss/qlen\n return loss + qloss\n\n\n# ## Qmodel\n# Función que entrena el modelo para predecir los cuantiles en el tiempo T+1. \n\n# In[ ]:\n\n\n# Model\n# [4], [100], [4,4], [100,100], [50,50], [50, 50, 10], [30, 30, 30, 30], [4, 4, 4, 4]\n\ndef Qmodel(internalConfig, Config):#, trainX, trainY, ylen, FUTURE, PAST, TARGET, BATCH_SIZE, TIMESTEP, FEATURES, TRAINPCT, OVERWRITE_MODEL, MODEL_NAME, QUANTILES, LAYERS, EPOCHS, TIMEDIST):\n SEED = Config['SEED']\n np.random.seed(SEED)\n set_random_seed(SEED*SEED)\n \n OVERWRITE_MODEL = Config[\"OVERWRITE_MODEL\"]\n LAYERS = Config[\"LAYERS\"]\n TIMESTEP = Config[\"TIMESTEP\"]\n BATCH_SIZE = Config[\"BATCH_SIZE\"]\n FOLDS_TVT = Config[\"FOLDS_TVT\"]\n EPOCHS = Config[\"EPOCHS\"]\n PATIENCE = Config[\"PATIENCE\"]\n DROP_RATE = Config[\"DROP_RATE\"]\n QUANTILES = Config[\"QUANTILES\"]\n TIMEDIST = Config[\"TIMEDIST\"]\n LOSS = Config[\"QLOSS\"]\n y_len = internalConfig[\"y_len\"]\n f_len = internalConfig[\"f_len\"]\n \n #trainX = internalConfig[\"trainX\"]\n #trainY = internalConfig[\"trainY\"]\n #validX = internalConfig[\"validX\"]\n #validY = internalConfig[\"validY\"]\n \n if FOLDS_TVT == False:\n list_trainX = [ internalConfig[\"trainX\"] ]\n list_trainY = [ internalConfig[\"trainY\"] ]\n list_validX = [ internalConfig[\"validX\"] ]\n list_validY = [ internalConfig[\"validY\"] ]\n else:\n list_trainX = internalConfig['list_trainX']\n list_trainY = internalConfig['list_trainY']\n list_validX = internalConfig['list_validX']\n list_validY = internalConfig['list_validY']\n \n \n LAYERS.reverse()\n DROP_RATE.reverse()\n losses = []\n list_models = []\n for fold in range(0,len(list_trainX)):\n internalConfig['fold'] = fold + 1\n print(\"Using Fold:\", fold)\n trainX = list_trainX[fold]\n trainY = list_trainY[fold]\n validX = list_validX[fold]\n validY = list_validY[fold]\n \n \n f_len = trainX.shape[-1]\n \n FILE_NAME = create_filename(internalConfig, Config)\n \n \n try:\n print(\"Model File Name: \",FILE_NAME)\n if OVERWRITE_MODEL == False:\n print(\"Loading Model...\")\n qModel = load_model(FILE_NAME, custom_objects = {'': lambda y,f: LOSS(QUANTILES,y_len,len(QUANTILES),y,f)})\n list_models.append(qModel)\n #qModel.summary()\n print(FILE_NAME + \" Loaded =)\")\n else:\n print(\"Reentrenando \"+ FILE_NAME)\n load_model(\"noexisto_nijamas_existire.hacheCinco\")\n except:\n # qModel = Sequential()\n # for Neurons in LAYERS[:-1]:\n # qModel.add(LSTM(Neurons, batch_input_shape=(BATCH_SIZE, TIMESTEP, f_len), stateful=True, return_sequences=True))\n # qModel.add(LSTM(LAYERS[-1], batch_input_shape=(BATCH_SIZE, TIMESTEP, f_len), stateful=True))\n # qModel.add(Dense( 1+len(QUANTILES) ))\n # #qModel.add(Dense( 1 ))\n # qModel.summary()\n # qModel.compile(loss=lambda y,f: quantil_loss(QUANTILES,y,f), optimizer='adam')\n # #qModel.compile(loss='mean_squared_error', optimizer='adam')\n # for i in range(EPOCHS):\n # print(\"%i/%i\"%(i+1,EPOCHS))\n # qModel.fit(trainX, trainY, epochs=1, batch_size=BATCH_SIZE, verbose=1, shuffle=False)\n # qModel.reset_states()\n # print(\"WWW\", trainY.shape)\n # print(\"WWW\", trainY)\n es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=PATIENCE)\n qModel = Sequential()\n if len(LAYERS) == 2:\n qModel.add(LSTM(LAYERS[1], activation=\"sigmoid\", input_shape=(TIMESTEP, f_len), return_sequences=True))\n qModel.add(Dropout(rate=DROP_RATE[1]))\n \n if TIMEDIST == True:\n pass\n else:\n qModel.add(LSTM(LAYERS[0], activation=\"sigmoid\", input_shape=(TIMESTEP, f_len), return_sequences=False))\n qModel.add(Dropout(rate=DROP_RATE[0]))\n qModel.add(Dense( y_len + len(QUANTILES), activation=\"linear\" ))\n \n #qModel.summary()\n qModel.compile(loss=lambda y,f: LOSS(QUANTILES, y_len,len(QUANTILES),y,f), optimizer='adam')\n \n qModel.fit(trainX, trainY, epochs=EPOCHS, batch_size=BATCH_SIZE, validation_data=(validX,validY), callbacks=[es], verbose=1)\n \n qModel.save(FILE_NAME)\n print(FILE_NAME + \" Saved =)\")\n \n list_models.append(qModel)\n \n return list_models, FILE_NAME\n\n\n# ## Qprediction\n# Función que realiza predicciones para el modelo entrenado mediante la función Qmodel. \n\n# In[ ]:\n\n\ndef Qprediction(internalConfig, Config):\n QUANTILES = Config[\"QUANTILES\"]\n BATCH_SIZE = Config[\"BATCH_SIZE\"]\n THETA = Config[\"THETA\"]\n moreTHETA = Config[\"moreTHETA\"]\n TIMEDIST = Config[\"TIMEDIST\"]\n FOLDS_TVT = Config[\"FOLDS_TVT\"]\n GRAPH = Config[\"GRAPH\"]\n #trainX = internalConfig[\"trainX\"]\n #trainY = internalConfig[\"trainY\"]\n #validX = internalConfig[\"validX\"]\n #validY = internalConfig[\"validY\"]\n #testX = internalConfig[\"testX\"]\n #testY = internalConfig[\"testY\"]\n Yscaler = internalConfig[\"scalers\"][\"Yscaler\"]\n h24scaler = internalConfig[\"scalers\"][\"h24scaler\"]\n list_models = internalConfig[\"list_models\"]\n \n \n #WW = Config[\"WW\"]\n \n #if WW != None:\n # qModel.set_weights(WW)\n \n if FOLDS_TVT == False:\n list_trainX = [ internalConfig[ \"trainX\"] ]\n list_trainY = [ internalConfig[ \"trainY\"] ]\n list_validX = [ internalConfig[ \"validX\"] ]\n list_validY = [ internalConfig[ \"validY\"] ]\n list_testX = [ internalConfig[ \"testX\" ] ]\n list_testY = [ internalConfig[ \"testY\" ] ]\n list_dateTrain = [ internalConfig[ \"dateTrain\"] ]\n list_dateValid = [ internalConfig[ \"dateValid\"] ]\n list_dateTest = [ internalConfig[ \"dateTest\"] ] \n \n else:\n list_trainX = internalConfig[ \"list_trainX\"]\n list_trainY = internalConfig[ \"list_trainY\"]\n list_validX = internalConfig[ \"list_validX\"]\n list_validY = internalConfig[ \"list_validY\"]\n list_testX = internalConfig[ \"list_testX\"]\n list_testY = internalConfig[ \"list_testY\"]\n list_dateTrain = internalConfig[\"list_dateTrain\"]\n list_dateValid = internalConfig[\"list_dateValid\"]\n list_dateTest = internalConfig[\"list_dateTest\"]\n \n # RMSE\n trainRMSE = []\n validRMSE = []\n testRMSE = []\n \n # MAE\n trainMAE = []\n validMAE = []\n testMAE = []\n \n scores_detail = {\n \"train\": {'quantity':[]},\n \"valid\": {'quantity':[]},\n \"test\" : {'quantity':[]},\n }\n \n # quantile metrics\n trainQmetricAll =[]\n validQmetricAll = []\n testQmetricAll = []\n trainQmetricAtTHETA =[]\n validQmetricAtTHETA = []\n testQmetricAtTHETA = []\n \n # Interval Coverage\n trainIC = []\n validIC = []\n testIC = []\n \n \n print(\"Qprediction ...\")\n for fold in range(0,len(list_trainX)):\n trainX = list_trainX[fold]\n trainY = list_trainY[fold]\n validX = list_validX[fold]\n validY = list_validY[fold]\n testX = list_testX[fold]\n testY = list_testY[fold]\n dateTrain = list_dateTrain[fold]\n dateValid = list_dateValid[fold]\n dateTest = list_dateTest[fold] \n \n qModel = list_models[fold]\n \n # Predictions\n \n qlen = len(QUANTILES)\n trainPred = qModel.predict(trainX)\n qModel.reset_states()\n validPred = qModel.predict(validX)\n qModel.reset_states()\n testPred = qModel.predict(testX)\n qModel.reset_states()\n \n if TIMEDIST == True:\n pass\n else:\n #print(\"trainPred.shape, \",trainPred.shape)\n #print(\"trainY.shape, \",trainY.shape)\n #print(\"validPred.shape, \",validPred.shape)\n #print(\"validY.shape, \",validY.shape)\n #print(\"testPred.shape, \",testPred.shape)\n #print(\"testY.shape, \",testY.shape)\n #Reduces shape to ( examples, y + QUANTILES). Discarding y0, y2, y-1, etc...\n trainPred = np.hstack( [trainPred[:,0,None], trainPred[:, -qlen:] ] )\n validPred = np.hstack( [validPred[:,0,None], validPred[:, -qlen:] ] )\n testPred = np.hstack( [testPred[:,0,None], testPred[:, -qlen:] ] )\n #trainY = np.hstack( [trainY[:,0,None], trainY[:, -qlen:] ] )\n #testY = np.hstack( [testY[:,0,None], testY[:, -qlen:] ] )\n #print(\"trainPred.shape, \",trainPred.shape)\n #print(\"trainY.shape, \",trainY.shape)\n #print(\"testPred.shape, \",testPred.shape)\n #print(\"testY.shape, \",testY.shape)\n \n \n # Inverse Transform\n finalTrainPredq = Yscaler.inverse_transform(trainPred[:,0, None])\n #print(trainY.shape)\n #print(trainY[:,0,None].shape)\n finalTrainY = Yscaler.inverse_transform(trainY)\n #print(finalTrainY.shape)\n finalValidPredq = Yscaler.inverse_transform(validPred[:,0, None])\n finalValidY = Yscaler.inverse_transform(validY)\n finalTestPredq = Yscaler.inverse_transform(testPred[:,0,None])\n finalTestY = Yscaler.inverse_transform(testY)\n \n \n # stats\n num_train = len(trainX)\n num_valid = len(validX)\n num_test = len(testX)\n \n num_THETA_train = np.sum(finalTrainY >= THETA)\n num_THETA_valid = np.sum(finalValidY >= THETA)\n num_THETA_test = np.sum(finalTestY >= THETA)\n \n scores_detail[\"train\"]['quantity'].append( (num_train, num_THETA_train) )\n scores_detail[\"valid\"]['quantity'].append( (num_valid, num_THETA_valid) )\n scores_detail[\"test\"]['quantity'].append( (num_test, num_THETA_test) )\n \n \n # calculate root mean squared error\n yt, yp = finalTrainY, finalTrainPredq\n scores = RMSE(yt, yp, THETA=THETA, ALL=True)\n scores = [scores[\"RMSE\"], scores[\"RMSPE\"], scores[\"RMSEat\"], scores[\"RMSPEat\"] ]\n for t in moreTHETA:\n scores.append( RMSE(yt, yp, THETA=t) )\n scores.append( RMSE(yt, yp, THETA=t, norm=True) )\n trainRMSE.append( scores )\n yt, yp = finalValidY, finalValidPredq\n scores = RMSE(yt, yp, THETA=THETA, ALL=True)\n scores = [scores[\"RMSE\"], scores[\"RMSPE\"], scores[\"RMSEat\"], scores[\"RMSPEat\"] ]\n for t in moreTHETA:\n scores.append( RMSE(yt, yp, THETA=t) )\n scores.append( RMSE(yt, yp, THETA=t, norm=True) )\n validRMSE.append( scores )\n yt, yp = finalTestY, finalTestPredq\n scores = RMSE(yt, yp, THETA=THETA, ALL=True)\n scores = [scores[\"RMSE\"], scores[\"RMSPE\"], scores[\"RMSEat\"], scores[\"RMSPEat\"] ]\n for t in moreTHETA:\n scores.append( RMSE(yt, yp, THETA=t) )\n scores.append( RMSE(yt, yp, THETA=t, norm=True) )\n testRMSE.append( scores )\n a,b = trainRMSE[-1][0:2]\n c,d = validRMSE[-1][0:2]\n e,f = testRMSE[-1][0:2]\n print(\"fold %s score (RMSE,RMSPE): (%.2f, %.2f) (%.2f, %.2f) (%.2f, %.2f)\"%(fold, a,b,c,d,e,f))\n \n '''\n scores = RMSE(finalTrainY, finalTrainPredq, THETA=THETA, ALL=True)\n trainRMSE.append( [scores[\"RMSE\"], scores[\"RMSPE\"], scores[\"RMSEat\"], scores[\"RMSPEat\"]] )\n scores = RMSE(finalValidY, finalValidPredq, THETA=THETA, ALL=True)\n validRMSE.append( [scores[\"RMSE\"], scores[\"RMSPE\"], scores[\"RMSEat\"], scores[\"RMSPEat\"]] )\n scores = RMSE(finalTestY, finalTestPredq, THETA=THETA, ALL=True)\n testRMSE.append( [scores[\"RMSE\"], scores[\"RMSPE\"], scores[\"RMSEat\"], scores[\"RMSPEat\"]] )\n \n trainScore = RMSE(finalTrainY, finalTrainPredq, 61)\n trainRMSE[-1].append( trainScore )\n validScore = RMSE(finalValidY, finalValidPredq, 61)\n validRMSE[-1].append( validScore )\n testScore = RMSE(finalTestY, finalTestPredq, 61)\n testRMSE[-1].append( testScore )\n '''\n \n #MAE\n yt, yp = finalTrainY, finalTrainPredq\n scores = MAE(yt, yp, THETA=THETA, ALL=True)\n scores = [scores[\"MAE\"], scores[\"MAPE\"], scores[\"MAEat\"], scores[\"MAPEat\"] ]\n for t in moreTHETA:\n scores.append( MAE(yt, yp, THETA=t) )\n scores.append( MAE(yt, yp, THETA=t, norm=True) )\n trainMAE.append( scores )\n yt, yp = finalValidY, finalValidPredq\n scores = MAE(yt, yp, THETA=THETA, ALL=True)\n scores = [scores[\"MAE\"], scores[\"MAPE\"], scores[\"MAEat\"], scores[\"MAPEat\"] ]\n for t in moreTHETA:\n scores.append( MAE(yt, yp, THETA=t) )\n scores.append( MAE(yt, yp, THETA=t, norm=True) )\n validMAE.append( scores )\n yt, yp = finalTestY, finalTestPredq\n scores = MAE(yt, yp, THETA=THETA, ALL=True)\n scores = [scores[\"MAE\"], scores[\"MAPE\"], scores[\"MAEat\"], scores[\"MAPEat\"] ]\n for t in moreTHETA:\n scores.append( MAE(yt, yp, THETA=t) )\n scores.append( MAE(yt, yp, THETA=t, norm=True) )\n testMAE.append( scores )\n \n '''\n scores = MAE(finalTrainY, finalTrainPredq, THETA=THETA, ALL=True)\n trainMAE.append( [scores[\"MAE\"], scores[\"MAPE\"], scores[\"MAEat\"], scores[\"MAPEat\"]] )\n scores = MAE(finalValidY, finalValidPredq, THETA=THETA, ALL=True)\n validMAE.append( [scores[\"MAE\"], scores[\"MAPE\"], scores[\"MAEat\"], scores[\"MAPEat\"]] )\n scores = MAE(finalTestY, finalTestPredq, THETA=THETA, ALL=True)\n testMAE.append( [scores[\"MAE\"], scores[\"MAPE\"], scores[\"MAEat\"], scores[\"MAPEat\"]] )\n '''\n \n \n # quantile metric\n trainMean , trainQMlosses = quantile_metrics(finalTrainY, QUANTILES, Yscaler.inverse_transform(trainPred[:,1:]))\n trainQlosses = list(trainQMlosses) + [trainMean]\n trainQmetricAll.append( trainQlosses )\n validMean , validQMlosses = quantile_metrics(finalValidY, QUANTILES, Yscaler.inverse_transform(validPred[:,1:]))\n validQlosses = list(validQMlosses) + [validMean]\n validQmetricAll.append( validQlosses )\n testMean , testQMlosses = quantile_metrics(finalTestY, QUANTILES, Yscaler.inverse_transform(testPred[:,1:]))\n testQlosses = list(testQMlosses) + [testMean]\n testQmetricAll.append( testQlosses )\n \n # Interval Coverage\n bp = np.mean(Yscaler.inverse_transform(trainPred[:,0]))\n nominal_sig = 1.0 - (QUANTILES[-1] - QUANTILES[0])\n \n inf_limit = Yscaler.inverse_transform(trainPred[:,1])\n sup_limit = Yscaler.inverse_transform(trainPred[:,-1])\n MIS, MSIS, cov_prob, lenght = IC_metrics(inf_limit, sup_limit, nominal_sig, finalTrainY, bp)\n trainIC.append( [MIS, MSIS, cov_prob, lenght] )\n \n inf_limit = Yscaler.inverse_transform(validPred[:,1])\n sup_limit = Yscaler.inverse_transform(validPred[:,-1])\n MIS, MSIS, cov_prob, lenght = IC_metrics(inf_limit, sup_limit, nominal_sig, finalValidY, bp)\n validIC.append( [MIS, MSIS, cov_prob, lenght] )\n \n inf_limit = Yscaler.inverse_transform(testPred[:,1])\n sup_limit = Yscaler.inverse_transform(testPred[:,-1])\n MIS, MSIS, cov_prob, lenght = IC_metrics(inf_limit, sup_limit, nominal_sig, finalTestY, bp)\n testIC.append( [MIS, MSIS, cov_prob, lenght] )\n \n \n # Only for graphics\n if GRAPH == True:\n for i in range(len(QUANTILES)):\n temp = Yscaler.inverse_transform(trainPred[:,1+i,None])\n finalTrainPredq = np.hstack([finalTrainPredq, temp])\n #temp = Yscaler.inverse_transform(trainY[:,1+i,None])\n #finalTrainY =np.hstack([finalTrainY, temp])\n finalTrainY = Yscaler.inverse_transform(trainY)\n \n temp = Yscaler.inverse_transform(validPred[:,1+i,None])\n finalValidPredq = np.hstack([finalValidPredq, temp])\n finalValidY = Yscaler.inverse_transform(validY)\n \n temp = Yscaler.inverse_transform(testPred[:,1+i,None])\n finalTestPredq = np.hstack([finalTestPredq, temp])\n #temp = Yscaler.inverse_transform(testY[:,1+i,None])\n #finalTestY = np.hstack([finalTestY, temp])\n finalTestY = Yscaler.inverse_transform(testY)\n \n internalConfig[\"trainPred\"] = finalTrainPredq\n internalConfig[\"trainYtrue\"] = finalTrainY\n internalConfig[\"validPred\"] = finalValidPredq\n internalConfig[\"validYtrue\"] = finalValidY\n internalConfig[\"testPred\"] = finalTestPredq\n internalConfig[\"testYtrue\"] = finalTestY\n internalConfig[\"last_dateTrain\"] = dateTrain\n internalConfig[\"last_dateValid\"] = dateValid\n internalConfig[\"last_dateTest\"] = dateTest\n \n graph_Qprediction(internalConfig, Config)\n \n \n \n \n #print(finalTrainPredq.shape)\n \n \n print(\"### LSTM CUANTÍLICA ###\")\n labels = [\"RMSE\", \"RMSPE\", \"RMSEat%s\"%THETA, \"RMSPEat%s\"%THETA]\n for t in moreTHETA:\n labels.append(\"RMSEat%s\"%t)\n labels.append(\"RMSPEat%s\"%t)\n maxlen = max(map(len,labels))\n trainMeans = np.nanmean(trainRMSE, axis=0)\n validMeans = np.nanmean(validRMSE, axis=0)\n testMeans = np.nanmean(testRMSE, axis=0)\n print(\"{:>{maxlen}}{:>8}{:>8}{:>8}\".format(\"\",\"Train\",\"Valid\",\"Test\",maxlen=maxlen))\n for i in range(len(labels)):\n print( (\"{0:>{maxlen}}{1:8.2f}{2:8.2f}{3:8.2f}\".format (labels[i], trainMeans[i], validMeans[i], testMeans[i], maxlen=maxlen)) )\n \n scores_detail[\"train\"][labels[i]] = np.array(trainRMSE)[:,i]\n scores_detail[\"valid\"][labels[i]] = np.array(validRMSE)[:,i]\n scores_detail[\"test\"][labels[i]] = np.array(testRMSE)[:,i]\n \n print(\"\\n\")\n '''\n labels = [\"RMSE\", \"RMSPE\", \"RMSEat%s\"%THETA, \"RMSPEat%s\"%THETA, \"RMSEat61\"]\n maxlen = max(map(len,labels))\n trainMeans = np.nanmean(trainRMSE, axis=0)\n validMeans = np.nanmean(validRMSE, axis=0)\n testMeans = np.nanmean(testRMSE, axis=0)\n print(\"{:>{maxlen}}{:>8}{:>8}{:>8}\".format(\"\",\"Train\",\"Valid\",\"Test\",maxlen=maxlen))\n for i in range(len(labels)):\n print( (\"{0:>{maxlen}}{1:8.2f}{2:8.2f}{3:8.2f}\".format (labels[i], trainMeans[i], validMeans[i], testMeans[i], maxlen=maxlen)) )\n print(\"\\n\")\n '''\n \n labels = [\"MAE\", \"MAPE\", \"MAEat%s\"%THETA, \"MAPEat%s\"%THETA]\n for t in moreTHETA:\n labels.append(\"RMSEat%s\"%t)\n labels.append(\"RMSPEat%s\"%t)\n maxlen = max(map(len,labels))\n trainMeans = np.nanmean(trainMAE, axis=0)\n validMeans = np.nanmean(validMAE, axis=0)\n testMeans = np.nanmean( testMAE, axis=0)\n print(\"{:>{maxlen}}{:>8}{:>8}{:>8}\".format(\"\",\"Train\",\"Valid\",\"Test\",maxlen=maxlen))\n for i in range(len(labels)):\n print( (\"{0:>{maxlen}}{1:8.2f}{2:8.2f}{3:8.2f}\".format (labels[i], trainMeans[i], validMeans[i], testMeans[i], maxlen=maxlen)) )\n \n scores_detail[\"train\"][labels[i]] = np.array(trainMAE)[:,i]\n scores_detail[\"valid\"][labels[i]] = np.array(validMAE)[:,i]\n scores_detail[\"test\"][labels[i]] = np.array(testMAE)[:,i]\n print(\"\\n\")\n \n '''\n labels = [\"MAE\", \"MAPE\", \"MAEat%s\"%THETA, \"MAPEat%s\"%THETA]\n maxlen = max(map(len,labels))\n trainMeans = np.nanmean(trainMAE, axis=0)\n validMeans = np.nanmean(validMAE, axis=0)\n testMeans = np.nanmean( testMAE, axis=0)\n print(\"{:>{maxlen}}{:>8}{:>8}{:>8}\".format(\"\",\"Train\",\"Valid\",\"Test\",maxlen=maxlen))\n for i in range(len(labels)):\n print( (\"{0:>{maxlen}}{1:8.2f}{2:8.2f}{3:8.2f}\".format (labels[i], trainMeans[i], validMeans[i], testMeans[i], maxlen=maxlen)) )\n print(\"\\n\")\n '''\n \n print(\"Quantile Metric\")\n labels = QUANTILES + [\"mean\"]\n trainMeans = np.nanmean(trainQmetricAll,axis=0)\n validMeans = np.nanmean(validQmetricAll,axis=0)\n testMeans = np.nanmean(testQmetricAll,axis=0)\n print(\"\\t\\tTrain\\tValid\\tTest\")\n for i in range(len(labels)):\n print(\"\\t%s\\t%.2f\\t%.2f\\t%.2f\" % (labels[i],trainMeans[i],validMeans[i],testMeans[i]))\n print(\"\\n\")\n \n labels = [\"MIS\", \"MSIS\", \"cov_prob\", \"lenght\"]\n trainMeans = np.nanmean(trainIC,axis=0)\n validMeans = np.nanmean(validIC,axis=0)\n testMeans = np.nanmean(testIC ,axis=0)\n print(\"{:>{maxlen}}{:>8}{:>8}{:>8}\".format(\"\",\"Train\",\"Valid\",\"Test\",maxlen=maxlen))\n for i in range(len(labels)):\n print( (\"{0:>{maxlen}}{1:8.2f}{2:8.2f}{3:8.2f}\".format (labels[i], trainMeans[i], validMeans[i], testMeans[i], maxlen=maxlen)) )\n \n #print(\"finalTrainPredq.shape, \",finalTrainPredq.shape)\n #print(\"finalTrainY.shape, \",finalTrainY.shape)\n #print(\"finalTestPredq.shape, \",finalTestPredq.shape)\n #print(\"finalTestY.shape, \",finalTestY.shape)\n #return (finalTrainPredq, finalTrainY, finalValidPredq, finalValidY, finalTestPredq, finalTestY)\n return scores_detail\n\n\n# ## graph_Qprediction\n# Grafica las predicciones realizadas por la función Qprediction.\n\n# In[ ]:\n\n\ndef graph_Qprediction(internalConfig, Config):\n QUANTILES = Config[\"QUANTILES\"]\n THETA = Config[\"THETA\"]\n \n finalTrainPredq = internalConfig[\"trainPred\"]\n finalTrainY = internalConfig[\"trainYtrue\"]\n finalValidPredq = internalConfig[\"validPred\"]\n finalValidY = internalConfig[\"validYtrue\"]\n finalTestPredq = internalConfig[\"testPred\"]\n finalTestY = internalConfig[\"testYtrue\"]\n dateTrain = internalConfig[\"last_dateTrain\"]\n dateValid = internalConfig[\"last_dateValid\"]\n dateTest = internalConfig[\"last_dateTest\"]\n date_index = internalConfig[\"dateExamples\"]\n \n file_name = internalConfig[\"file_name\"]\n dataset = internalConfig[\"complete_dataset\"]\n Yscaler = internalConfig[\"scalers\"][\"Yscaler\"]\n \n \n # Graph quantile TrainPred\n #forPlot = np.hstack([dateTrain[:,None], finalTrainY[:,0,None] ])\n \n print(dateTrain[:,None].shape)\n print(finalTrainY.shape)\n \n print(\"first dateTrain:\", dateTrain[0])\n print(\"last dateTrain:\", dateTrain[-1])\n print(\"first dateValid:\", dateValid[0])\n print(\"last dateValid:\", dateValid[-1])\n print(\"first dateTest:\", dateTest[0])\n print(\"last dateTest:\", dateTest[-1])\n \n forPlot = np.hstack([dateTrain[:,None], finalTrainY ])\n for c in finalTrainPredq.T[:,:,None]:\n forPlot = np.hstack([forPlot, c])\n #print(\"forPLot.shape, \", forPlot.shape)\n df = pd.DataFrame(forPlot)\n df.columns = [\"fecha\", \"y\", \"f\"] + list( map(str,QUANTILES) )\n df = df.set_index(\"fecha\")\n #print(df.asfreq(\"D\").iplot(title=FILE_NAME))\n \n #'''\n # Graph quantile validPred\n forPlot = np.hstack([dateValid[:,None], finalValidY ])\n for c in finalValidPredq.T[:,:,None]:\n forPlot = np.hstack([forPlot, c])\n dv = pd.DataFrame(forPlot)\n dv.columns = [\"fecha\", \"y\", \"f\"] + list( map(str,QUANTILES) )\n dv = dv.set_index(\"fecha\")\n #'''\n \n # Graph quantile TestPred\n #forPlot = np.hstack([dateTest[:,None], finalTestY[:,0,None] ])\n forPlot = np.hstack([dateTest[:,None], finalTestY ])\n for c in finalTestPredq.T[:,:,None]:\n forPlot = np.hstack([forPlot, c])\n #print(\"forPLot.shape, \", forPlot.shape)\n dd = pd.DataFrame(forPlot)\n dd.columns = [\"fecha\", \"y\", \"f\"] + list( map(str,QUANTILES) )\n dd = dd.set_index(\"fecha\")\n #print(dd.asfreq(\"D\").iplot(title=FILE_NAME))\n #print(dd.iplot(title=FILE_NAME))\n\n #cc = pd.concat([df,dd], axis=0).drop(\"y\",axis=1)\n cc = pd.concat([df,dv,dd], axis=0).drop(\"y\",axis=1)\n i = date_index[0] + np.timedelta64(-1,\"D\")\n f = date_index[-1] + np.timedelta64(-1,\"D\")\n\n allY = dataset[\"y\"][i:f]\n allY.index = allY.index + np.timedelta64(1,\"D\")\n allY = pd.DataFrame(Yscaler.inverse_transform(allY[:,None]), index=allY.index)\n allY.columns = [\"y\"]\n\n hh = pd.concat([allY,cc], axis= 1)\n\n hh.iplot(title=file_name, vline=[dateValid[0],dateTest[0]], hline=[THETA])\n return hh\n\n\n# ## quantileLSTM\n# Llama a las otras funciones para entrenar un modelo, realizar predicciones y graficar. \n# Devuelve el modelo entrenado y un DataFrame con las predicciones para graficar directamente.\n\n# In[ ]:\n\n\ndef quantileLSTM(Config):#, AGREGADOS, TARGET, THETA, FUTURE, PAST, FEATURES, CUT, BAN, TIMESTEP, OVERLAP, BATCH_SIZE, TRAINPCT, OVERWRITE_MODEL, MODEL_NAME, QUANTILES, LAYERS, EPOCHS, TIMEDIST):\n #np.random.seed(123)\n #set_random_seed(2)\n \n FOLDS_TVT = Config['FOLDS_TVT']\n TIMESTEP = Config['TIMESTEP']\n TIMEDIST = Config['TIMEDIST']\n Config['SHIFT'] = Config['FUTURE'] * -1\n \n #SHIFT = FUTURE*-1\n #PRECALC = precalcular_agregados()\n #dataset, ylabels, Yscaler, h24scaler = import_merge_and_scale(AGREGADOS,TARGET, THETA, PRECALC, SHIFT, PAST)\n \n scalers = {}\n ic = {\"scalers\": scalers}\n \n complete_dataset, ylabels, Yscaler, h24scaler = import_merge_and_scale(Config, verbose=False)\n ic[\"complete_dataset\"] = complete_dataset\n ic[\"ylabels\"] = ylabels\n scalers['Yscaler'] = Yscaler\n scalers[\"h24scaler\"] = h24scaler\n y_len = len(ic[\"ylabels\"])\n ic[\"y_len\"] = y_len\n \n data, features = select_features(ic, Config)\n ic[\"data\"] = data\n ic[\"features\"] = features\n ic[\"f_len\"] = len(ic[\"features\"])\n \n ic[\"secuencias\"] = obtener_secuencias(ic)\n \n examples, y_examples, dateExamples = make_examples(ic, Config, verbose=False)\n ic[\"examples\"] = examples\n ic[\"y_examples\"] = y_examples\n ic[\"dateExamples\"] = dateExamples\n \n if FOLDS_TVT == False:\n tvtDict = make_traintest(ic, Config, verbose=False)\n for key in tvtDict:\n ic[key] = tvtDict[key]\n \n else:\n list_tvtDict = make_folds_TVT(ic, Config)\n for key in list_tvtDict:\n ic[key] = list_tvtDict[key]\n \n #trainYq, testYq = make_qY(ic, Config)\n #ic[\"trainYq\"] = trainYq\n #ic[\"validYq\"] = validYq\n #ic[\"testYq\"] = testYq\n\n \n \n #trainYq = trainYq#.reshape(-1,5)\n #print(\"trainYq.shape, \", trainYq.shape)\n list_models, file_name = Qmodel(ic, Config)\n if FOLDS_TVT == False:\n ic[\"model\"] = list_models[0]\n ic[\"list_models\"] = [ ic[\"model\"] ]\n else:\n ic[\"list_models\"] = list_models\n \n ic[\"file_name\"] = file_name\n \n \n ##ftrp-> TRaining Prediction\n ##ftry-> TRaining Y\n ##ftep-> TEst Prediction\n ##ftey-> TEst Y\n #ftrp, ftry, fvap, fvay, ftep, ftey = Qprediction(ic, Config)\n #ic[\"trainPred\"] = ftrp\n #ic[\"trainYtrue\"] = ftry\n #ic[\"validPred\"] = fvap\n #ic[\"validYtrue\"] = fvay\n #ic[\"testPred\"] = ftep\n #ic[\"testYtrue\"] = ftey\n \n detail = Qprediction(ic, Config)\n \n #if FOLDS_TVT == False:\n #Qdf = graph_Qprediction(ic, Config)\n #ic[\"df\"] = Qdf\n \n #scalers = {\"Yscaler\":Yscaler,\"h24scaler\":h24scaler}\n #d = {\"data\":data, \"trainX\":trainX, \"trainY\":trainY, \"testX\":testX, \"testY\":testY, \"dateTrain\":dateTrain, \"dateTest\":dateTest,\n # \"trainPred\":ftrp, \"trainYtrue\":ftry, \"testPred\":ftep, \"testYtrue\":ftey,\n # \"modelName\":MODEL_NAME, \"scalers\":scalers}\n return ic\n\n\n# ## Pruebas\n\n# In[ ]:\n\n\nSTATION, FILTER_YEARS, THETA = get_station(\"Parque_OHiggins\")\nqConfig = {\n \"STATION\" : STATION,\n \"SCALER\" : preprocessing.StandardScaler,\n \"IMPUTATION\" : None,\n \"AGREGADOS\":[],\n \"TARGET\": \"O3\",\n \"PRECALC\" : precalcular_agregados(STATION),\n \"THETA\": THETA,\n \"moreTHETA\" : [],\n \"FUTURE\": 1, #must be 1\n \"PAST\": False, #must be False\n \n \"FILTER_YEARS\" : FILTER_YEARS,\n \"CUT\" : 0.41,\n \"FIXED_FEATURES\" : ['CO', 'PM10', 'PM25', 'NO', 'NOX', 'WD', 'RH', 'TEMP', 'WS', 'UVA', 'UVB', 'O3'], # empty list means that the CUT will be used\n \"BAN\" : [\"countEC\", \"EC\",\"O3btTHETA\"],\n \n \"FOLDS_TVT\" : True,\n \"TIMESTEP\" : 7,\n \"OVERLAP\": True,\n \n \"SHUFFLE\" : False,\n \"TRAINPCT\": 0.85,\n \n \"OVERWRITE_MODEL\": False,\n \"MODEL_NAME\" : \"qModel\",\n \"QUANTILES\": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],\n \"LAYERS\" : [9],\n \"DROP_RATE\": [0.376241262719619],\n \"QLOSS\" : quantil_loss,\n \"BATCH_SIZE\": 16,\n \"EPOCHS\" : 400,\n \"PATIENCE\" : 20,\n \"GRAPH\" : True,\n \"TIMEDIST\" : False, #Sin implementar\n \n }\n#Qoutput = quantileLSTM(qConfig)\n\n\n# In[ ]:\n\n\n#qConfig[\"GRAPH\"] = False\n#temp = Qprediction(Qoutput, qConfig)\n\n\n# ## Pruebas con varias seeds - preSQP\n\n# In[ ]:\n\n\nSTATION, FILTER_YEARS, THETA = get_station(\"CONDES 4F\")\nqConfig = {\n \"STATION\" : STATION,\n \"SCALER\" : preprocessing.StandardScaler,\n \"IMPUTATION\" : None,\n \"AGREGADOS\":[],\n \"TARGET\": \"O3\",\n \"PRECALC\" : precalcular_agregados(STATION),\n \"THETA\": THETA,\n \"moreTHETA\" : [],\n \"FUTURE\": 1, #must be 1\n \"PAST\": False, #must be False\n \n \"FILTER_YEARS\" : FILTER_YEARS,\n \"CUT\" : 0.41,\n \"FIXED_FEATURES\" : ['CO', 'PM10', 'PM25', 'NO', 'NOX', 'WD', 'RH', 'TEMP', 'WS', 'UVA', 'UVB', 'O3'], # empty list means that the CUT will be used\n \"BAN\" : [\"countEC\", \"EC\",\"O3btTHETA\"],\n \n \"FOLDS_TVT\" : True,\n \"TIMESTEP\" : 2,\n \"OVERLAP\": True,\n \n \"SHUFFLE\" : False,\n \"TRAINPCT\": 0.85,\n \n \"OVERWRITE_MODEL\": True,\n \"MODEL_NAME\" : \"qModel\",\n \"QUANTILES\": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],\n \"LAYERS\" : [3],\n \"DROP_RATE\": [0.0687025004823643],\n \"QLOSS\" : quantil_loss,\n \"BATCH_SIZE\": 16,\n \"EPOCHS\" : 400,\n \"PATIENCE\" : 20,\n \"GRAPH\" : False,\n \"TIMEDIST\" : False, #Sin implementar\n \n }\n'''\nseeds = [123, 57, 872, 340, 77, 583, 101, 178, 938, 555]\nall_scoresQ = []\nall_outputsQ = []\nfor s in seeds:\n qConfig[\"SEED\"] = s\n Qoutput = quantileLSTM(qConfig)\n all_outputsQ.append(Qoutput)\n all_scoresQ.append( Qprediction(Qoutput, qConfig) )\n'''\n\n\n# In[ ]:\n\n\n'''\nall_metrics = [\"RMSE\", \"RMSEat%s\"%THETA]\nfor m in all_metrics:\n for d in ['train', 'valid', 'test']:\n fmeans = []\n for i in range( len(seeds) ):\n fmeans.append( np.nanmean(all_scoresQ[i][d][m]) ) #mean over folds\n mean = np.mean(fmeans)\n std = np.std(fmeans)\n print(m, d, mean, std)\n'''\n\n\n# In[ ]:\n\n\n#STOP\n\n\n# # Red Selectora\n\n# ## classify\n# Obtiene las clases y las marcas, asociadas a cada ejemplo. Recibe una predicción del modelo cuantílico y los Y asociados, ambos en el formato que se obtienen mediante la función Qprediction.\n\n# In[ ]:\n\n\ndef classify(QPRED, QYTRUE, ):\n classY = []\n classMark = []\n for i in range(len(QPRED)):\n # in qpred ----> qn > ... > q2 > q1\n #qpred = QPRED[i, 1:][::-1]\n qpred = QPRED[i, 1:]\n ypred = QPRED[i, 0]\n ytrue = QYTRUE[i, 0]\n newy = []\n newmk = []\n classified = False\n\n omg = False\n for q1,q2 in zip(qpred[:-1], qpred[1:]):\n #if ypred < q1 and ypred >= q2:\n if ypred > q1 and ypred <= q2:\n newmk.append( ypred )\n else:\n newmk.append( np.mean( (q1,q2) ) )\n \n #if q2 > q1:\n if q1 > q2:\n omg = True\n print(\"q1 > q2 omg\")\n print(qpred, ytrue)\n print(i,q2,q1)\n \n if classified != True:\n #if ytrue < q1 and ytrue >= q2:\n if ytrue > q1 and ytrue <= q2:\n newy.append(1)\n classified = True\n else:\n newy.append(0)\n else:\n newy.append(0)\n \n if classified == False:\n #if ytrue >= qpred[0]:\n if ytrue <= qpred[0]:\n newy[0] = 1\n #print(\"ERA MAYOR\")\n #elif ytrue < qpred[-1]:\n elif ytrue > qpred[-1]:\n newy[-1] = 1\n #print(\"ERA MENOR\")\n else:\n print(\"NO DEBERIA ESTAR AQUI\")\n return\n \n if omg:\n print(\"OMGGGG\")\n #return\n pass\n \n classY.append(newy)\n classMark.append(newmk)\n \n classY = np.array(classY)\n classMark = np.array(classMark)\n\n return classY, classMark\n\n\n# ## make_newExamples\n# Obtiene las caracteristicas indicadas en subFeatures, recorta los timeStep, y redimensiona el arreglo para que los timestep sean características.\n\n# In[ ]:\n\n\ndef make_newExamples(examples, subFeatures, subTimeStep, d):\n FEATURES = d[\"data\"].columns.tolist()\n subIndex = []\n for s in subFeatures:\n subIndex.append( FEATURES.index(s) )\n \n newExamples = examples[:, -subTimeStep:, subIndex]\n \n a,b,c = newExamples.shape\n newExamples = newExamples.reshape( (a,b*c) )\n \n #if len(examples) > len(d[\"testY\"]):\n # print(examples.shape)\n # print(newExamples.shape)\n # print(d[\"trainY\"].shape)\n # newExamples = np.hstack([newExamples, d[\"trainY\"][:, -subTimeStep:, 0]])\n # \n # \n #else:\n # newExamples = np.hstack([newExamples, d[\"testY\"][:, -subTimeStep:, 0]])\n \n return newExamples\n \n\n\n# ## make_classes_and_newExamples\n# Crea las clases para las etiquetas de entrenamiento y test.\n\n# In[ ]:\n\n\ndef make_classes_and_newExamples(quantileOutput, selectConfig): #d, subFeatures, subTimeStep):\n #d = quantileOutput\n subFeatures = selectConfig[\"subFeatures\"]\n subTimeStep = selectConfig[\"subTimeStep\"]\n isLSTM = selectConfig[\"isLSTM\"]\n F_LEN = len(subFeatures)\n \n \n print(\"Making Classes and new Examples ...\")\n # Cada fila contiene: (y, q1, q2, ..., qn)\n # donde q1 < q2 < ... < qn\n trainPred = quantileOutput[\"trainPred\"]\n trainYtrue = quantileOutput[\"trainYtrue\"]\n testPred = quantileOutput[\"testPred\"]\n testYtrue = quantileOutput[\"testYtrue\"]\n \n # new Y and classes\n print(\"trainPred.shape, \", trainPred.shape)\n print(\"testPred.shape, \", testPred.shape)\n classTrainY, classTrainMark = classify(trainPred, trainYtrue)\n classTestY, classTestMark = classify(testPred, testYtrue)\n print(\"classTrainMark, \",classTrainMark.shape)\n print(\"classTrainY.shape, \", classTrainY.shape)\n print(\"classTestY.shape, \", classTestY.shape)\n \n # new examples\n trainX = quantileOutput[\"trainX\"]\n testX = quantileOutput[\"testX\"]\n \n print(\"trainX.shape, \",trainX.shape)\n print(\"testX.shape, \",testX.shape)\n newTrainX = make_newExamples(trainX, subFeatures, subTimeStep,quantileOutput)\n newTestX = make_newExamples(testX, subFeatures, subTimeStep,quantileOutput)\n print(\"newTrainX.shape, \",newTrainX.shape)\n print(\"newTestX.shape, \",newTestX.shape)\n \n Yscaler = quantileOutput[\"scalers\"][\"Yscaler\"]\n \n for tr, te in zip(trainPred[:,0:1].T, testPred[:,0:1].T):\n newTrainX = np.hstack([newTrainX, Yscaler.transform( tr[:, None] ) ])\n newTestX = np.hstack([newTestX, Yscaler.transform( te[: ,None] ) ])\n print(\"newTrainX.shape, \",newTrainX.shape)\n print(\"newTestX.shape, \",newTestX.shape)\n \n if isLSTM == True:\n trainX = newTrainX[:,:-1].reshape( (-1, subTimeStep, F_LEN) )\n testX = newTestX[:,:-1].reshape( (-1, subTimeStep, F_LEN) )\n \n #### repeating mean preadiction from quantileLSTM\n ###temp = newTrainX[:,-1]\n ###temp = np.repeat(temp,subTimeStep).reshape(-1, subTimeStep, 1)\n ###trainX = np.concatenate([trainX,temp],axis=2)\n ###temp = newTestX[:,-1]\n ###temp = np.repeat(temp,subTimeStep).reshape(-1, subTimeStep, 1)\n ###testX = np.concatenate([testX,temp],axis=2)\n \n newTrainX = trainX\n newTestX = testX\n \n\n \n selectD = {\"trainY\":classTrainY, \"trainMarks\":classTrainMark, \"testY\":classTestY, \"testMarks\":classTestMark,\n \"trainX\":newTrainX, \"testX\":newTestX, \"quantileOutput\":quantileOutput, \"F_LEN\":F_LEN}\n \n return selectD\n \n \n \n \n \n\n'''\noutputForSelect = quantileLSTM(\n AGREGADOS=[],\n TARGET= \"O3\",\n FUTURE=1, #must be >= 1\n PAST=False, #must be False or >= 0\n \n FEATURES=[],\n CUT=0.26,\n BAN=[],\n \n TIMESTEP = 5,\n OVERLAP=True,\n \n BATCH_SIZE= 16,\n TRAINPCT=0.85,\n \n OVERWRITE_MODEL=False,\n MODEL_NAME = \"qModel\",\n QUANTILES=[0.1, 0.3, 0.5, 0.7, 0.9],\n LAYERS = [12],\n EPOCHS = 100,\n TIMEDIST = False, #Sin implementar\n \n )\n'''\n# ## SQPmodel\n\n# In[ ]:\n\n\ndef SQPmodel(selectConfig, quantileOutput):\n np.random.seed(123)\n set_random_seed(2)\n \n qfilename = quantileOutput[\"file_name\"]\n qLSTM = quantileOutput[\"model\"]\n dfFS = quantileOutput[\"df\"]\n dFS = quantileOutput\n \n subFeatures = selectConfig[\"subFeatures\"]\n subTimeStep = selectConfig[\"subTimeStep\"]\n LAYERS = selectConfig[\"LAYERS\"]\n DROP_RATE = selectConfig[\"DROP_RATE\"]\n EPOCHS = selectConfig[\"EPOCHS\"]\n OVERWRITE_MODEL = selectConfig[\"OVERWRITE_MODEL\"]\n # TIPO DE RED\n isLSTM = selectConfig[\"isLSTM\"]\n \n \n \n #selectD = make_classes_and_newExamples(dFS, subFeatures, subTimeStep)\n #** ic = internal Config **\n ic = make_classes_and_newExamples(quantileOutput, selectConfig)\n \n \n trainX = ic[\"trainX\"]\n trainY = ic[\"trainY\"]\n \n print(np.min(trainY))\n print(np.max(trainY))\n print(\"trainX.shape,\",trainX.shape)\n \n qfileHash = md5(qfilename.encode()).hexdigest()\n ic[\"qfileHash\"] = qfileHash\n FILE_NAME = create_filename(ic, selectConfig)\n \n try:\n print(\"Model File Name: \",FILE_NAME)\n if OVERWRITE_MODEL == False:\n print(\"Loading Model...\")\n model = load_model(FILE_NAME)\n print(FILE_NAME + \" Loaded =)\")\n else:\n print(\"Reentrenando \"+ FILE_NAME)\n load_model(\"noexisto_nijamas_existire.hacheCinco\")\n except:\n \n if isLSTM == True:\n #LAYERS=[4]\n #TIMESTEP = subTimeStep\n ###F_LEN = len(subFeatures)\n ###\n ###print(ic[\"trainX\"].shape)\n ###trainX = ic[\"trainX\"][:,:-1].reshape( (-1, subTimeStep, F_LEN) )\n ###temp = ic[\"trainX\"][:,-1]\n ###temp = np.repeat(temp,subTimeStep).reshape(-1, subTimeStep, 1)\n ###print(trainX.shape)\n ###print(temp.shape)\n ###trainX = np.concatenate([trainX,temp],axis=2)\n ###print(trainX.shape)\n ###print(trainY.shape)\n ###\n ###ic[\"trainX\"] = trainX\n \n model = Sequential()\n for Neurons in LAYERS[:-1]:\n model.add(LSTM(Neurons, input_shape=(subTimeStep, trainX.shape[2]), return_sequences=True))\n #model.add(Dropout(rate=DROP_RATE))\n model.add(LSTM(LAYERS[-1], input_shape=(subTimeStep, trainX.shape[2]), return_sequences=False))\n #model.add(Dropout(rate=DROP_RATE))\n model.add(Dense(trainY.shape[1] , activation='softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n #model.fit( trainX, trainY, epochs=600, batch_size=16 )\n model.fit( trainX, trainY, epochs=EPOCHS, batch_size=16 )\n else:\n #LAYERS=[100,50,50]\n print(trainX.shape)\n print(trainY.shape)\n model = Sequential()\n #model.add(Dense(LAYERS[0], input_dim=trainX.shape[1], activation='relu'))\n #model.add(Dropout(rate=DROP_RATE))\n for Neurons in LAYERS[:-1]:\n model.add(Dense(Neurons, activation='relu'))\n model.add(Dropout(rate=DROP_RATE))\n #model.add(Dense(50, activation='relu'))\n #model.add(Dropout(rate=DROP_RATE))\n model.add(Dense(LAYERS[-1], activation='relu'))\n #model.add(Dropout(rate=DROP_RATE))\n model.add(Dense(trainY.shape[1] , activation='softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n #model.fit( trainX, trainY, epochs=1000, batch_size=16 )\n model.fit( trainX, trainY, epochs=EPOCHS, batch_size=16 )\n \n model.save(FILE_NAME)\n print(FILE_NAME + \" Saved =)\")\n \n ic[\"model\"] = model\n return ic\n\n\n# ## SQPrediction\n\n# In[ ]:\n\n\ndef SQPrediction(internalConfig, selectConfig, quantileOutput):\n np.random.seed(123)\n set_random_seed(2)\n \n isLSTM = selectConfig[\"isLSTM\"]\n subTimeStep = selectConfig[\"subTimeStep\"]\n F_LEN = internalConfig[\"F_LEN\"]\n model = internalConfig[\"model\"]\n trainX = internalConfig[\"trainX\"]\n testX = internalConfig[\"testX\"]\n trainY = internalConfig[\"trainY\"]\n testY = internalConfig[\"testY\"]\n \n \n ###if isLSTM == True:\n ### print(\"trainX, \", internalConfig[\"trainX\"].shape)\n ### print(\"testX, \", internalConfig[\"testX\"].shape)\n ### testX = internalConfig[\"testX\"][:,:-1].reshape( (-1, subTimeStep, F_LEN) )\n ### # adding mean prediciton of quantileLSTM\n ### temp = internalConfig[\"testX\"][:,-1]\n ### print(\"temp, \", temp.shape)\n ### temp = np.repeat(temp,subTimeStep).reshape(-1, subTimeStep, 1)\n ### print(\"testX,\", testX.shape)\n ### print(\"temp, \", temp.shape)\n ### testX = np.concatenate([testX,temp],axis=2)\n ### print(testX.shape)\n ### \n ### \n ### \n ### #print(ic[\"trainX\"].shape)\n ### #trainX = ic[\"trainX\"][:,:-1].reshape( (-1, subTimeStep, F_LEN) )\n ### #temp = ic[\"trainX\"][:,-1]\n ### #temp = np.repeat(temp,subTimeStep).reshape(-1, subTimeStep, 1)\n ### #print(trainX.shape)\n ### #print(temp.shape)\n ### #trainX = np.concatenate([trainX,temp],axis=2)\n ### #print(trainX.shape)\n ### \n ### \n ### internalConfig[\"testX\"] = testX\n ###else:\n ### testX = internalConfig[\"testX\"]\n \n testY = internalConfig[\"testY\"]\n \n caca = model.predict(trainX[0, None])\n print(caca)\n print(trainY[0])\n print(quantileOutput['trainPred'][0])\n a = quantileOutput['trainYq'][0,None]\n print(quantileOutput[\"scalers\"][\"Yscaler\"].inverse_transform(a))\n \n \n trainPred = model.predict(trainX)\n testPred = model.predict(testX)\n \n ctrainPred = trainPred\n #ctrainPred = []\n #for p in trainPred:\n # print(type(p.tolist()))\n # n = [0]*len(p)\n # n[ np.argmax(p) ] = 1\n # ctrainPred.append(n)\n print(\"###########\")\n print( np.sum(np.array(ctrainPred),axis=0) )\n print( np.sum(np.array(trainY),axis=0) )\n ctrainPred = np.sum( np.array(ctrainPred) * internalConfig[\"trainMarks\"], axis= 1 )[:, None]\n #print(ctrainPred)\n \n ctestPred = testPred\n #ctestPred = []\n #for p in testPred:\n # n = [0]*len(p)\n # n[ np.argmax(p) ] = 1\n # ctestPred.append(n)\n ctestPred = np.sum( np.array(ctestPred) * internalConfig[\"testMarks\"], axis= 1 )[:, None]\n \n #print(\"ctestPrede\")\n #print(ctestPred)\n \n trainYtrue = quantileOutput[\"trainYtrue\"][:,0, None]\n testYtrue = quantileOutput[\"testYtrue\"][:,0, None]\n \n \n print(\"### RED SELECTORA ###\")\n \n print(\"Calculando Error\")\n trainScore = math.sqrt(mean_squared_error(trainYtrue, ctrainPred))\n print(' Train Score: %.2f RMSE' % (trainScore))\n testScore = math.sqrt(mean_squared_error(testYtrue, ctestPred))\n print(' Test Score: %.2f RMSE' % (testScore))\n \n theta=61\n print(\"Calculando RMSEat%d\"%theta)\n trainScore = RMSEat(theta,trainYtrue, ctrainPred)\n print(' Train Score: %.2f RMSEat%d' % (trainScore,theta))\n testScore = RMSEat(theta,testYtrue, ctestPred)\n print(' Test Score: %.2f RMSEat%d' % (testScore, theta))\n \n internalConfig[\"trainPred\"] = ctrainPred\n internalConfig[\"testPred\"] = ctestPred\n \n return internalConfig\n\n\n# ## SQP\n\n# In[ ]:\n\n\ndef SQP(SQPconfig, quantileConfig):\n for cfg in quantileConfig:\n if cfg not in SQPconfig:\n SQPconfig[cfg] = quantileConfig[cfg]\n quantileOutput = quantileLSTM(quantileConfig)\n internalConfig = SQPmodel(SQPconfig, quantileOutput)\n SQPoutput = SQPrediction(internalConfig, SQPconfig, quantileOutput)\n \n return SQPoutput\n\n\n# ## Pruebas\n# Red cuantílica que alimentará a la selectora, es independite de la cuantílica de pruebas.\n'''\n# Red cuantílica que alimentará a la selectora, es independite de la cuantílica de pruebas.\nqConfigForSelect = { \n \"AGREGADOS\":[],\n \"TARGET\": \"O3\",\n \"PRECALC\": precalcular_agregados(),\n \"THETA\":61,\n \"FUTURE\":1, #must be >= 1\n \"PAST\":False, #must be False or >= 0\n \n \"FIXED_FEATURES\":[],\n \"CUT\":0.26,\n \"BAN\":[\"countEC\",\"EC\"],\n \n \"TIMESTEP\" : 5,\n \"OVERLAP\":True,\n \n \"BATCH_SIZE\": 16,\n \"TRAINPCT\":0.85,\n \n \"OVERWRITE_MODEL\":False,\n \"MODEL_NAME\" : \"qModel\",\n \"QUANTILES\":[0.1, 0.3, 0.5, 0.7, 0.9],\n \"LAYERS\" : [12],\n \"DROP_RATE\": 0.4,\n \"EPOCHS\" : 100,\n \"TIMEDIST\" : False, #Sin implementar\n }\n\noutputForSelect = quantileLSTM(qConfigForSelect)selectConfig = {\n \"subFeatures\": ['CO', 'PM10', 'PM25', 'NO', 'NOX', 'WD', 'RH', 'TEMP', 'WS', 'UVA', 'UVB','O3'], #d[\"data\"].columns.tolist()[:-6]#\n \"subTimeStep\": 5, #debe ser <= con el cual fue entrenada la red cuantilica\n \"isLSTM\": False,\n \"EPOCHS\":500,\n \"DROP_RATE\":0.4,\n \"LAYERS\":[4],\n \"OVERWRITE_MODEL\": False,\n \"MODEL_NAME\": \"SQPmodel\",\n }\n\noutputSQP = SQPmodel(selectConfig, outputForSelect)\n# SQPoutput = SQPrediction(outputSQP, selectConfig, outputForSelect)\n### RED SELECTORA ### FFN\nCalculando Error\n Train Score: 16.18 RMSE\n Test Score: 14.91 RMSE\nCalculando RMSEat61\n Train Score: 14.32 RMSEat61\n Test Score: 12.86 RMSEat61\n \n model = Sequential()\n model.add(Dense(50, input_dim=inputdim, activation='relu'))\n model.add(Dropout(rate=0.4))\n model.add(Dense(25, activation='relu'))\n #model.add(Dropout(rate=0.4))\n #model.add(Dense(25, activation='relu'))\n model.add(Dense(trainY.shape[1] , activation='softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n model.fit( trainX, trainY, epochs=500, batch_size=16 )### RED SELECTORA ### FFN 3Layers\nCalculando Error\n Train Score: 17.03 RMSE\n Test Score: 15.42 RMSE\nCalculando RMSEat61\n Train Score: 14.75 RMSEat61\n Test Score: 13.82 RMSEat61\n \n model = Sequential()\n model.add(Dense(100, input_dim=inputdim, activation='relu'))\n model.add(Dropout(rate=0.4))\n model.add(Dense(50, activation='relu'))\n model.add(Dropout(rate=0.4))\n model.add(Dense(50, activation='relu'))\n model.add(Dropout(rate=0.4))\n #model.add(Dense(25, activation='relu'))\n model.add(Dense(trainY.shape[1] , activation='softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n model.fit( trainX, trainY, epochs=500, batch_size=16 )### RED SELECTORA ### LSTM\nCalculando Error\n Train Score: 18.45 RMSE\n Test Score: 15.22 RMSE\nCalculando RMSEat61\n Train Score: 15.83 RMSEat61\n Test Score: 12.47 RMSEat61\n \n LAYERS=[50, 25] # tambien con LAYERS=[3]\n model = Sequential()\n for Neurons in LAYERS[:-1]:\n model.add(LSTM(Neurons, input_shape=(TIMESTEP, trainX.shape[2]), return_sequences=True))\n model.add(Dropout(rate=0.4))\n model.add(LSTM(LAYERS[-1], input_shape=(TIMESTEP, trainX.shape[2]), return_sequences=False))\n model.add(Dropout(rate=0.4))\n model.add(Dense(trainY.shape[1] , activation='softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n model.fit( trainX, trainY, epochs=150, batch_size=16 )\n'''\n# In[ ]:\n\n\n# Red cuantílica que alimentará a la selectora, es independite de la cuantílica de pruebas.\nSTATION = \"Las_Condes\"\nquantileConfig = { \n \"SCALER\" : preprocessing.StandardScaler,\n \"AGREGADOS\":[],\n \"TARGET\": \"O3\",\n \"PRECALC\": precalcular_agregados(STATION),\n \"THETA\":61,\n \"FUTURE\":1, #must be >= 1\n \"PAST\":False, #must be False or >= 0\n \n \"FILTER_YEARS\" : [2004,2014],\n \"CUT\" : 0.41,\n \"FIXED_FEATURES\" : ['CO', 'PM10', 'PM25', 'NO', 'NOX', 'WD', 'RH', 'TEMP', 'WS', 'UVA', 'UVB', 'O3'], # empty list means that the CUT will be used\n \"BAN\" : [\"countEC\", \"EC\",\"O3btTHETA\"],\n \n \"TIMESTEP\" : 28,\n \"OVERLAP\":True,\n \n \"SHUFFLE\" : True,\n \"BATCH_SIZE\": 16,\n \"TRAINPCT\":0.85,\n \n \"OVERWRITE_MODEL\":False,\n \"MODEL_NAME\" : \"qModel\",\n \"QUANTILES\":[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],\n \"LAYERS\" : [25,32], \n \"DROP_RATE\": [0.0151298677510781, 0.0485420356560454],\n \n \"EPOCHS\" : 400,\n \"PATIENCE\" : 20,\n \"TIMEDIST\" : False, #Sin implementar\n }\n\nSQPconfig = {\n \"subFeatures\": ['CO', 'PM10', 'PM25', 'NO', 'NOX', 'WD', 'RH', 'TEMP', 'WS', 'UVA', 'UVB','O3'], #d[\"data\"].columns.tolist()[:-6]#\n \"subTimeStep\": 14, #debe ser <= con el cual fue entrenada la red cuantilica\n \"isLSTM\": False,\n \"EPOCHS\":150,\n \"DROP_RATE\":0.4,\n \"LAYERS\":[4],\n \n \"OVERWRITE_MODEL\": False,\n \"MODEL_NAME\": \"SQPmodel\",\n }\n\n#SQPoutput = SQP(SQPconfig,quantileConfig)\n\n\n# In[ ]:\n\n\n#dFS = SQPoutput[\"quantileOutput\"]\n#dfFS = SQPoutput[\"quantileOutput\"][\"df\"]\n#ctrainPred = SQPoutput[\"trainPred\"]\n#ctestPred = SQPoutput[\"testPred\"]\n\n#selectiveX = join_index(dFS[\"dateTrain\"], ctrainPred, \"trainSelect\")\n#selectiveY = join_index(dFS[\"dateTest\"], ctestPred, \"testSelect\")\n#tempSelect = join_index( np.vstack([dFS[\"dateTrain\"][:, None],dFS[\"dateTest\"][:, None]]),\n# np.vstack([ctrainPred,ctestPred]), \"X+Y\" )\n#tempSelect2 = join_index( dFS[\"dateTest\"][:, None], ctestPred, \"OnlyTest\")\n#toPlot = pd.concat([dfFS[[\"y\",\"f\"]], selectiveX, selectiveY, tempSelect, tempSelect2], axis=1)\n#toPlot[[\"y\",\"f\",\"trainSelect\", \"testSelect\"] ].iplot(title=\"Comparaciones\")\n\n\n# In[ ]:\n\n\n#temp = pd.DataFrame(np.abs(toPlot[\"y\"] - toPlot[\"f\"]))\n#temp.columns = [\"diff f %.3f\"%temp[[0]].mean()[0] ]\n#\n#temp2 = pd.DataFrame(np.abs(toPlot[\"y\"] - toPlot[\"OnlyTest\"] ))\n##print(temp2)\n#temp2.columns = [\"diff selective %.3f \"%temp2[[0]].mean()[0] ]\n##print(temp2)\n#\n##print(dfFS[\"y\"])\n##print(tempSelect)\n#\n#diffPlot = pd.concat([temp, temp2],axis=1)\n#temp = pd.DataFrame(diffPlot)\n#diffPlot = diffPlot.dropna()\n#\n##diffPlot.iplot(title=\"Diferencias hacia el Y real\")\n#\n#a = diffPlot.min(axis=1)\n#b = diffPlot[ diffPlot.columns[0] ]\n#b[b != a] = 0\n#b[b == a] = -10\n#diffPlot[ diffPlot.columns[0] ] = b\n#\n#b = diffPlot[ diffPlot.columns[1] ]\n#b[b != a] = 0\n#b[b == a] = 10\n#diffPlot[ diffPlot.columns[1] ] = b\n#\n#diffPlot.iplot(kind=\"bar\", mode=\"stack-bar\")\n#print(\"Cantidad de veces que predice mejor una sobre la otra\")\n#print(\"f, \", abs(diffPlot.sum()/10)[0])\n#print(\"red selectora,\",abs(diffPlot.sum()/10)[1])\n\n\n# In[ ]:\n\n\n#t = temp.values\n#np.sum(t[:,0] == t[:,1])\n\n\n# # Hyperas\n\n# In[ ]:\n\n\nfrom hyperopt import Trials, STATUS_OK, tpe\nfrom hyperas import optim\nfrom hyperas.distributions import choice, uniform, randint, quniform\nimport pickle \n\n\n# ## data\n\n# In[ ]:\n\n\ndef data():\n #STATION, FILTER_YEARS, THETA = \"Las_Condes\", [2004, 2013], 89\n #STATION, FILTER_YEARS, THETA = \"Independencia\", [2009, 2017], 56\n #STATION, FILTER_YEARS, THETA = \"Parque_OHiggins\", [2009, 2017], 56\n \n #STATION, FILTER_YEARS, THETA = \"Las_Condes\", [2008, 2013], 84#[2004, 2013], 89\n #STATION, FILTER_YEARS, THETA = \"Independencia\", [2009, 2014], 58#[2009, 2017], 56\n #STATION, FILTER_YEARS, THETA = \"Parque_OHiggins\", [2009, 2014], 60\n STATION, FILTER_YEARS, THETA = get_station(\"POH_full\")\n Config = {\n # import_merge_and_scale()\n \"STATION\" : STATION,\n \"SCALER\" : preprocessing.StandardScaler,\n \"AGREGADOS\" : [],#[\"O3\",\"TEMP\",\"WS\",\"RH\"], #[\"ALL\"] #Horas de los maximos que se quieren agregar.\n \"IMPUTATION\" : None,\n \"TARGET\" : \"O3\",\n \"PRECALC\" : [],#precalcular_agregados(),\n \"THETA\" : THETA,\n \"moreTHETA\" : [],\n \"FUTURE\" : 1,\n \"PAST\" : False,\n \"FILTER_YEARS\" : FILTER_YEARS,\n \n # select_features()\n \"CUT\" : 0.41, #relacionado con FILTER_YEARS\n \"FIXED_FEATURES\" : ['CO', 'PM10', 'PM25', 'NO', 'NOX', 'WD', 'RH', 'TEMP', 'WS', 'UVA', 'UVB', 'O3'], # empty list means that the CUT will be used\n \"BAN\" : [\"countEC\", \"EC\",\"O3btTHETA\"], # []\n \n #make_examples()\n \"FOLDS_TVT\" : True,\n \"OVERLAP\" : True,\n \n \"GRAPH\" : False,\n \"Yx\" : 0 # DEFAULT 0\n }\n \n np.random.seed(123)\n Config['SHIFT'] = Config['FUTURE'] * -1\n \n scalers = {}\n ic = {\"scalers\": scalers}\n \n complete_dataset, ylabels, Yscaler, h24scaler = import_merge_and_scale(Config, verbose=False)\n ic[\"complete_dataset\"] = complete_dataset\n ic[\"ylabels\"] = ylabels\n scalers['Yscaler'] = Yscaler\n scalers[\"h24scaler\"] = h24scaler\n \n y_len = len(ic[\"ylabels\"])\n ic[\"y_len\"] = y_len\n \n data, features = select_features(ic, Config)\n ic[\"data\"] = data\n ic[\"features\"] = features\n \n ic[\"f_len\"] = len(ic[\"features\"])\n \n ic[\"secuencias\"] = obtener_secuencias(ic)\n \n \n dtrainX = {}\n dtrainY = {}\n dvalidX = {}\n dvalidY = {}\n dtestX = {}\n dtestY = {}\n ddateTrain = {}\n ddateValid = {}\n ddateTest = {} \n for LAGS in [1, 2, 3, 5, 7, 14, 21, 28]:\n #for LAGS in [1, 2, 3, 4, 5]:\n \n Config[\"TIMESTEP\"] = LAGS\n examples, y_examples, dateExamples = make_examples(ic, Config, verbose=False)\n ic[\"examples\"] = examples\n ic[\"y_examples\"] = y_examples\n ic[\"dateExamples\"] = dateExamples\n \n d = make_folds_TVT(ic, Config)\n dtrainX[LAGS] = d[\"list_trainX\"]\n dtrainY[LAGS] = d[\"list_trainY\"]\n dvalidX[LAGS] = d[\"list_validX\"]\n dvalidY[LAGS] = d[\"list_validY\"]\n dtestX[LAGS] = d[\"list_testX\"]\n dtestY[LAGS] = d[\"list_testY\"]\n ddateTrain[LAGS] = d[\"list_dateTrain\"]\n ddateValid[LAGS] = d[\"list_dateValid\"]\n ddateTest[LAGS] = d[\"list_dateTest\"]\n \n all_data = {\n \"trainX\":dtrainX,\n \"trainY\":dtrainY,\n \"validX\":dvalidX,\n \"validY\":dvalidY,\n \"testX\" :dtestX,\n \"testY\" :dtestY,\n \"dateTrain\": ddateTrain,\n \"dateValid\": ddateValid,\n \"dateTest\": ddateTest,\n }\n \n filename = 'dictDATA%s.pkl'%(Config[\"IMPUTATION\"])\n output = open(filename, 'wb')\n pickle.dump(all_data, output)\n output.close()\n \n return all_data, scalers, Config\n\n\n# In[ ]:\n\n\na = data()\n\n\n# In[ ]:\n\n\n#STOP\n\n\n# In[ ]:\n\n\ndef HyperData():\n pkl_file = open('dictDATANone.pkl', 'rb')\n all_data = pickle.load(pkl_file)\n pkl_file.close()\n dtrainX=all_data[\"trainX\"]\n dtrainY=all_data[\"trainY\"]\n dvalidX=all_data[\"validX\"]\n dvalidY=all_data[\"validY\"]\n \n return dtrainX, dtrainY, dvalidX, dvalidY\n\n\n# In[ ]:\n\n\n#HyperData()\n\n\n# ## models\n\n# ### HyperLSTM\n\n# In[ ]:\n\n\ndef get_HyperLSTM_Config():\n Config = {\n #myLSTM()\n \"BATCH_SIZE\" : 16,\n \"EPOCHS\" : 400,\n \"PATIENCE\" : 20,\n }\n return Config\n\n\n# In[ ]:\n\n\ndef HyperLSTM(dtrainX, dtrainY, dvalidX, dvalidY):\n Config = {\n # import_merge_and_scale()\n \"BATCH_SIZE\" : 16,\n \n #myLSTM()\n \"EPOCHS\" : 400,\n \"PATIENCE\" : 20,\n }\n \n \n Config = get_HyperLSTM_Config()\n \n #print(f_len)\n BATCH_SIZE = Config[\"BATCH_SIZE\"]\n PATIENCE = Config[\"PATIENCE\"]\n EPOCHS = Config[\"EPOCHS\"]\n \n LAGS = {{choice([1, 2, 3, 5, 7, 14, 21, 28])}}\n #LAGS = {#{choice([1, 2, 3, 4, 5])}}\n TIMESTEP = LAGS\n print(\"LAGS,\", LAGS)\n \n list_trainX = dtrainX[LAGS]\n list_trainY = dtrainY[LAGS]\n list_validX = dvalidX[LAGS]\n list_validY = dvalidY[LAGS]\n \n #print(\"SPACE\")\n #for k in space:\n # print(k,space[k])\n \n \n \n losses = []\n list_models = []\n for fold in range(0,len(list_trainX)):\n print(\"Using Fold %s/%s:\"%(fold,len(list_trainX)-1))\n trainX = list_trainX[fold]\n trainY = list_trainY[fold]\n validX = list_validX[fold]\n validY = list_validY[fold]\n \n \n f_len = trainX.shape[-1]\n print(\"trainX.shape,\",trainX.shape)\n print(\"trainY.shape,\",trainY.shape)\n print(\"validX.shape,\",validX.shape)\n print(\"validY.shape,\",validY.shape)\n \n \n es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=PATIENCE, restore_best_weights=True)\n model = Sequential()\n layers={{choice([\"one\", \"two\"])}}\n print(layers)\n if layers == 'two':\n model.add(LSTM(round({{uniform(0,1)}}*10000)%(LAGS*2)+1, activation=\"sigmoid\", input_shape=(TIMESTEP, f_len), return_sequences=True))\n model.add(Dropout(rate={{uniform(0, 1)}}))\n \n model.add(LSTM(round({{uniform(0,1)}}*10000)%(LAGS*2)+1, activation=\"sigmoid\", input_shape=(TIMESTEP, f_len), return_sequences=False))\n model.add(Dropout(rate={{uniform(0, 1)}}))\n model.add(Dense( 1, activation='linear'))\n model.compile(loss='mean_squared_error', optimizer='adam' )\n model.fit( trainX, trainY, epochs=EPOCHS, validation_data=(validX, validY), batch_size=BATCH_SIZE, callbacks=[es], verbose=2)\n loss = model.evaluate(validX, validY, verbose=1)\n print(\"fold: \",fold, \" loss:\",loss)\n losses.append(loss)\n list_models.append(model)\n print(\"Hora de termino: \",str(datetime.now()))\n \n meanloss = sum(losses)/len(losses)\n print('Valid Loss:', meanloss)\n return {'loss': meanloss, 'status': STATUS_OK, 'model': list_models}\n\n\n# ### HyperQuantile\n\n# In[ ]:\n\n\ndef get_HyperQuantile_Config():\n Config = {\n #myLSTM()\n \"BATCH_SIZE\" : 16,\n \"EPOCHS\" : 400,\n \"PATIENCE\" : 20,\n }\n return Config\n\n\n# In[ ]:\n\n\ndef HyperQuantile(dtrainX, dtrainY, dvalidX, dvalidY):\n Config = get_HyperQuantile_Config()\n BATCH_SIZE = Config[\"BATCH_SIZE\"]\n EPOCHS = Config[\"EPOCHS\"]\n PATIENCE = Config[\"PATIENCE\"]\n \n LAGS = {{choice([1, 2, 3, 5, 7, 14, 21, 28])}}\n #LAGS = {#{choice([1, 2, 3, 4, 5])}}\n TIMESTEP = LAGS\n print(\"LAGS,\", LAGS)\n \n list_trainX = dtrainX[LAGS]\n list_trainY = dtrainY[LAGS]\n list_validX = dvalidX[LAGS]\n list_validY = dvalidY[LAGS]\n \n #print(\"SPACE\")\n #for k in space:\n # print(k,space[k])\n QUANTILES = {{choice([\n [0.1, 0.9],\n [0.3, 0.7],\n [0.1, 0.3, 0.7, 0.9],\n [0.2, 0.4, 0.6, 0.8],\n ])}}\n qlen = len(QUANTILES)\n \n losses = []\n list_models = []\n for fold in range(0,len(list_trainX)):\n print(\"Using Fold %s-%s:\"%(fold,len(list_trainX)-1))\n trainX = list_trainX[fold]\n trainY = list_trainY[fold]\n validX = list_validX[fold]\n validY = list_validY[fold]\n \n \n f_len = trainX.shape[-1]\n print(\"trainX.shape,\",trainX.shape)\n print(\"trainY.shape,\",trainY.shape)\n print(\"validX.shape,\",validX.shape)\n print(\"validY.shape,\",validY.shape)\n \n \n es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=PATIENCE, restore_best_weights=True)\n qModel = Sequential()\n layers={{choice([\"one\", \"two\"])}}\n print(layers)\n if layers == 'two':\n qModel.add(LSTM(round({{uniform(0,1)}}*10000)%(LAGS*2)+1, activation=\"sigmoid\", input_shape=(TIMESTEP, f_len), return_sequences=True))\n qModel.add(Dropout(rate={{uniform(0, 1)}}))\n \n qModel.add(LSTM(round({{uniform(0,1)}}*10000)%(LAGS*2)+1, activation=\"sigmoid\", input_shape=(TIMESTEP, f_len), return_sequences=False))\n qModel.add(Dropout(rate={{uniform(0, 1)}}))\n qModel.add(Dense( 1 + len(QUANTILES), activation=\"linear\" ))\n \n qModel.compile(loss=lambda y,f: meanquantil_loss2(QUANTILES, 1,qlen,y,f), optimizer='adam')\n \n qModel.fit(trainX, trainY, epochs=EPOCHS, batch_size=BATCH_SIZE, validation_data=(validX,validY), callbacks=[es], verbose=2)\n \n loss = qModel.evaluate(validX, validY, verbose=2)\n print(\"fold: \",fold, \" loss:\",loss)\n losses.append(loss)\n list_models.append(qModel)\n print(\"Hora de termino: \",str(datetime.now()))\n \n meanloss = sum(losses)/len(losses)\n print('Valid Loss:', meanloss)\n return {'loss': meanloss, 'status': STATUS_OK, 'model': list_models}\n\n\n# ### HyperPreSQP\n\n# In[ ]:\n\n\ndef get_preSQP_Config():\n Config = {\n #myLSTM()\n \"BATCH_SIZE\" : 16,\n \"EPOCHS\" : 400,\n \"PATIENCE\" : 20,\n \"QUANTILES\" : [ 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7 ,0.8 ,0.9],\n #\"QUANTILES\" : [ 0.75],\n }\n return Config\n\n\n# In[ ]:\n\n\ndef HyperPreSQP(dtrainX, dtrainY, dvalidX, dvalidY):\n Config = get_preSQP_Config()\n BATCH_SIZE = Config[\"BATCH_SIZE\"]\n EPOCHS = Config[\"EPOCHS\"]\n PATIENCE = Config[\"PATIENCE\"]\n QUANTILES = Config[\"QUANTILES\"]\n \n LAGS = {{choice([1, 2, 3, 5, 7, 14, 21, 28])}}\n #LAGS = {#{choice([1, 2, 3, 4, 5])}}\n TIMESTEP = LAGS\n print(\"LAGS,\", LAGS)\n \n list_trainX = dtrainX[LAGS]\n list_trainY = dtrainY[LAGS]\n list_validX = dvalidX[LAGS]\n list_validY = dvalidY[LAGS]\n \n #print(\"SPACE\")\n #for k in space:\n # print(k,space[k])\n\n qlen = len(QUANTILES)\n \n losses = []\n list_models = []\n for fold in range(0,len(list_trainX)):\n print(\"Using Fold %s-%s:\"%(fold,len(list_trainX)-1))\n trainX = list_trainX[fold]\n trainY = list_trainY[fold]\n validX = list_validX[fold]\n validY = list_validY[fold]\n \n \n f_len = trainX.shape[-1]\n print(\"trainX.shape,\",trainX.shape)\n print(\"trainY.shape,\",trainY.shape)\n print(\"validX.shape,\",validX.shape)\n print(\"validY.shape,\",validY.shape)\n \n \n es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=PATIENCE, restore_best_weights=True)\n qModel = Sequential()\n layers={{choice([\"one\", \"two\"])}}\n print(layers)\n if layers == 'two':\n qModel.add(LSTM(round({{uniform(0,1)}}*10000)%(LAGS*2)+1, activation=\"sigmoid\", input_shape=(TIMESTEP, f_len), return_sequences=True))\n qModel.add(Dropout(rate={{uniform(0, 1)}}))\n \n qModel.add(LSTM(round({{uniform(0,1)}}*10000)%(LAGS*2)+1, activation=\"sigmoid\", input_shape=(TIMESTEP, f_len), return_sequences=False))\n qModel.add(Dropout(rate={{uniform(0, 1)}}))\n qModel.add(Dense( 1 + len(QUANTILES), activation=\"linear\" ))\n \n qModel.compile(loss=lambda y,f: quantil_loss(QUANTILES, 1,qlen,y,f), optimizer='adam')\n \n qModel.fit(trainX, trainY, epochs=EPOCHS, batch_size=BATCH_SIZE, validation_data=(validX,validY), callbacks=[es], verbose=2)\n \n loss = qModel.evaluate(validX, validY, verbose=2)\n print(\"fold: \",fold, \" loss:\",loss)\n losses.append(loss)\n list_models.append(qModel)\n print(\"Hora de termino: \",str(datetime.now()))\n \n meanloss = sum(losses)/len(losses)\n print('Valid Loss:', meanloss)\n return {'loss': meanloss, 'status': STATUS_OK, 'model': list_models}\n\n\n# ## runs\n\n# In[ ]:\n\n\nif \"-f\" not in sys.argv:\n MODEL_NAME = sys.argv[1]\n MAX_EVALS = int(sys.argv[2])\n print(\"MODEL_NAME:\", MODEL_NAME)\n print(\"MAX_EVALS:\", MAX_EVALS)\nelse:\n MODEL_NAME = \"HyperLSTM\"\n MAX_EVALS = 1\n\n\nif MODEL_NAME == \"HyperLSTM\":\n HyperModel = HyperLSTM\n get_Config = get_HyperLSTM_Config\n FUNCTIONS = [get_HyperLSTM_Config]\n\nelif MODEL_NAME == \"HyperQ\":\n HyperModel = HyperQuantile\n get_Config = get_HyperQuantile_Config\n FUNCTIONS = [get_HyperQuantile_Config, meanquantil_loss2]\n\nelif MODEL_NAME == \"preSQP\":\n HyperModel = HyperPreSQP\n get_Config = get_preSQP_Config\n FUNCTIONS = [get_preSQP_Config, quantil_loss]\n\n\n# In[ ]:\n\n\n#STOP\n\n\n# In[ ]:\n\n\nbest_run, best_model = optim.minimize(model= HyperModel,\n data= HyperData,\n algo=tpe.suggest,\n functions = FUNCTIONS,\n max_evals=MAX_EVALS,\n trials=Trials(),\n notebook_name= \"Tisis\" if \"-f\" in sys.argv else None\n )\n\n\n# In[ ]:\n\n\nprint(best_run)\n\n\n# In[ ]:\n\n\nif MODEL_NAME == \"HyperLSTM\":\n LAGS = [1, 2, 3, 5, 7, 14, 21, 28][best_run['LAGS']]\n #LAGS = [1, 2, 3, 4, 5][best_run['LAGS']]\n layers = ['one', 'two'][best_run['layers']]\n lstm1 = round(best_run['round']*10000)%(LAGS*2)+1\n dropout1 = best_run['rate']\n lstm2 = round(best_run['round_1']*10000)%(LAGS*2)+1\n dropout2 = best_run['rate_1']\n \n print(\"Configuracion:\")\n print(\" LAGS:\",LAGS)\n print(\" layers:\",layers)\n print(\" lstm1:\",lstm1)\n print(\" dropout1:\",dropout1)\n print(\" lstm2:\",lstm2)\n print(\" dropout2:\",dropout2)\n print(\"\")\n \n Config = a[2] #Config de data()\n scalers = a[1]\n modelConfig = get_Config()\n Config[\"TIMEDIST\"] = LAGS\n Config[\"BATCH_SIZE\"] = modelConfig[\"BATCH_SIZE\"]\n ic = {}\n ic[\"scalers\"] = scalers\n ic[\"list_models\"] = best_model\n for k in a[0]:\n ic[\"list_\"+k] = a[0][k][LAGS]\n #ic[\"list_trainX\"] = a[0][\"trainX\"][LAGS]\n #ic[\"list_trainY\"] = a[0][\"trainY\"][LAGS]\n #ic[\"list_validX\"] = a[0][\"validX\"][LAGS]\n #ic[\"list_validY\"] = a[0][\"validY\"][LAGS]\n #ic[\"list_testX\"] = a[0][\"testX\"][LAGS]\n #ic[\"list_testY\"] = a[0][\"testY\"][LAGS]\n #ic[\"list_dateTrain\"] = a[0][\"dateTrain\"][LAGS]\n #ic[\"list_dateValid\"] = a[0][\"dateValid\"][LAGS]\n #ic[\"list_dateTest\"] = a[0][\"dateTest\"][LAGS]\n myLSTMPredict(ic,Config)\n\n\n# In[ ]:\n\n\nif MODEL_NAME == \"HyperQ\":\n LAGS = [1, 2, 3, 5, 7, 14, 21, 28][best_run['LAGS']]\n #LAGS = [1, 2, 3, 4, 5][best_run['LAGS']]\n layers = ['one', 'two'][best_run['layers']]\n lstm1 = round(best_run['round']*10000)%(LAGS*2)+1\n dropout1 = best_run['rate']\n lstm2 = round(best_run['round_1']*10000)%(LAGS*2)+1\n dropout2 = best_run['rate_1']\n QUANTILES = [\n [0.1, 0.9],\n [0.3, 0.7],\n [0.1, 0.3, 0.7, 0.9],\n [0.2, 0.4, 0.6, 0.8],\n ][best_run['QUANTILES']]\n \n print(\"Configuracion:\")\n print(\" LAGS:\",LAGS)\n print(\" QUANTILES:\",QUANTILES)\n print(\" layers:\",layers)\n print(\" lstm1:\",lstm1)\n print(\" dropout1:\",dropout1)\n print(\" lstm2:\",lstm2)\n print(\" dropout2:\",dropout2)\n print(\"\")\n \n Config = a[2] #Config de data()\n scalers = a[1]\n modelConfig = get_Config()\n Config[\"TIMEDIST\"] = LAGS\n Config[\"QUANTILES\"] = QUANTILES\n Config[\"BATCH_SIZE\"] = modelConfig[\"BATCH_SIZE\"]\n ic = {}\n ic[\"scalers\"] = scalers\n ic[\"list_models\"] = best_model\n for k in a[0]:\n ic[\"list_\"+k] = a[0][k][LAGS]\n #ic[\"list_trainX\"] = a[0][\"trainX\"][LAGS]\n #ic[\"list_trainY\"] = a[0][\"trainY\"][LAGS]\n #ic[\"list_validX\"] = a[0][\"validX\"][LAGS]\n #ic[\"list_validY\"] = a[0][\"validY\"][LAGS]\n #ic[\"list_testX\"] = a[0][\"testX\"][LAGS]\n #ic[\"list_testY\"] = a[0][\"testY\"][LAGS]\n #ic[\"list_dateTrain\"] = a[0][\"dateTrain\"][LAGS]\n #ic[\"list_dateValid\"] = a[0][\"dateValid\"][LAGS]\n #ic[\"list_dateTest\"] = a[0][\"dateTest\"][LAGS]\n \n \n Qprediction(ic, Config)\n print(\"mean_loss2, meanerror + meanLq\")\n print('''QUANTILES = {{choice([\n [0.1, 0.9],\n [0.3, 0.7],\n [0.1, 0.3, 0.7, 0.9],\n [0.2, 0.4, 0.6, 0.8],\n ])}}''')\n\n\n# In[ ]:\n\n\nif MODEL_NAME == \"preSQP\":\n LAGS = [1, 2, 3, 5, 7, 14, 21, 28][best_run['LAGS']]\n #LAGS = [1, 2, 3, 4, 5][best_run['LAGS']]\n layers = ['one', 'two'][best_run['layers']]\n lstm1 = round(best_run['round']*10000)%(LAGS*2)+1\n dropout1 = best_run['rate']\n lstm2 = round(best_run['round_1']*10000)%(LAGS*2)+1\n dropout2 = best_run['rate_1']\n modelConfig = get_Config()\n QUANTILES = modelConfig[\"QUANTILES\"]\n \n print(\"Configuracion:\")\n print(\" LAGS:\",LAGS)\n print(\" QUANTILES:\",QUANTILES)\n print(\" layers:\",layers)\n print(\" lstm1:\",lstm1)\n print(\" dropout1:\",dropout1)\n print(\" lstm2:\",lstm2)\n print(\" dropout2:\",dropout2)\n print(\"\")\n \n Config = a[2] #Config de data()\n scalers = a[1]\n \n Config[\"TIMEDIST\"] = LAGS\n Config[\"QUANTILES\"] = QUANTILES\n Config[\"BATCH_SIZE\"] = modelConfig[\"BATCH_SIZE\"]\n ic = {}\n ic[\"scalers\"] = scalers\n ic[\"list_models\"] = best_model\n for k in a[0]:\n ic[\"list_\"+k] = a[0][k][LAGS]\n \n Qprediction(ic, Config)\n print(\"mean_loss2, meanerror + meanLq\")\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"work/tuning.done/POH/POH_full_2019-07-21_LSTM_none_35try.py","file_name":"POH_full_2019-07-21_LSTM_none_35try.py","file_ext":"py","file_size_in_byte":178630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"128141064","text":"import time\nimport urllib.request\n\nimport html2text\n\nMAX_SIZE_FOR_TRAINING_SET = 40\n\nSEPERATOR_TOLORANCE = 1\n\nMAX_WORDS_FOR_LINE = 100\n\nMAX_WORDS_FOR_PARA = 120\n\nI = 4\n\n\ndef calibrate(result_paragraph):\n fixed_paragrapes = []\n for i in range(len(result_paragraph)):\n para = result_paragraph[i]\n lines_in_para = para.split('\\n')\n main_para = ''\n for line in lines_in_para:\n if len(line.split(' ')) > MAX_WORDS_FOR_LINE:\n fixed_paragrapes.append(line)\n elif line != '':\n main_para += line+'\\n'\n fixed_paragrapes.append(main_para)\n\n return fixed_paragrapes\n\n\n\ndef get_text_from_url(url, retries = 5):\n if retries == 0:\n raise Exception(\"could not handle request\")\n try:\n user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'\n headers = {'User-Agent': user_agent, }\n request = urllib.request.Request(url, None, headers)\n\n html = urllib.request.urlopen(request, timeout=10).read().decode('utf-8')\n\n h = html2text.HTML2Text()\n h.ignore_links = True\n allText = h.handle(html)\n\n paragraphs = allText.split('\\n\\n')\n\n\n result_paragraph = []\n next_para_to_add = ''\n separator_tolerance = SEPERATOR_TOLORANCE\n for text in paragraphs:\n\n if len(text.split(' ')) > MAX_WORDS_FOR_PARA:\n text = text.split(' ')\n n = MAX_WORDS_FOR_PARA\n chunked = [' '.join(text[i:i + n]) for i in range(0, len(text), n)]\n result_paragraph.extend(chunked)\n continue\n\n lines = (line.strip() for line in text.splitlines())\n # break multi-headlines into a line each\n chunks = (phrase.strip() for line in lines for phrase in line.split(\" \"))\n # drop blank lines\n text_result = '\\n'.join(chunk for chunk in chunks if chunk)\n if text_result != '':\n next_para_to_add += '\\n' + text_result\n else:\n separator_tolerance = separator_tolerance - 1\n\n if text_result == '' and separator_tolerance == 0:\n insert_text(next_para_to_add, result_paragraph)\n next_para_to_add = ''\n separator_tolerance = SEPERATOR_TOLORANCE\n\n return calibrate(result_paragraph)\n except Exception as exc:\n if retries > 0:\n print(\"an exception occurred while trying to access url {} trying again \\n more details: {}\"\n .format(url, exc))\n time.sleep(1)\n retries = retries - 1\n return get_text_from_url(url, retries)\n else:\n raise\n\n\ndef get_all_text_from_url(url, retries = 5):\n if retries == 0:\n raise Exception(\"could not handle request\")\n try:\n user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'\n headers = {'User-Agent': user_agent}\n request = urllib.request.Request(url, None, headers)\n\n html = urllib.request.urlopen(request).read().decode('utf-8')\n\n h = html2text.HTML2Text()\n h.ignore_links = True\n h.ignore_images = True\n\n allText = h.handle(html)\n\n return allText\n except Exception as exc:\n if retries > 0:\n print(\"an exception occurred while trying to access url {} trying again \\n more details: {}\"\n .format(url, exc))\n time.sleep(1)\n retries = retries - 1\n return get_all_text_from_url(url, retries)\n else:\n raise\n\n\n\ndef insert_text(next_para_to_add, result_paragraph):\n if next_para_to_add != '':\n if len(next_para_to_add.split(' ')) > MAX_SIZE_FOR_TRAINING_SET:\n index_with_newline = next_para_to_add.find('\\n')\n insert_text(next_para_to_add[0:index_with_newline], result_paragraph)\n insert_text(next_para_to_add[index_with_newline + 1], result_paragraph)\n\n\n result_paragraph.append(next_para_to_add)","sub_path":"utils/textExtractor.py","file_name":"textExtractor.py","file_ext":"py","file_size_in_byte":4128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"336558769","text":"import pandas as pd\nimport numpy as np\nfrom settings import Run_Val\nfrom utils import Log\n\n\ndef replace_val_from_df_dict(data: dict, replace_dict: dict) -> dict:\n \"\"\"\n 根据指定的替换字典,对整个数据集字典中的所有数据表的需替换值进行替换\n\n :param data: 待替换的数据集字典\n :param replace_dict: 替换字典\n :return: 替换后的数据集字典\n \"\"\"\n for x in data:\n data[x] = pd.DataFrame(data[x]).replace(replace_dict)\n return data\n\n\ndef replace_types_in_df_dict(data: dict, replace_dict: dict) -> dict:\n \"\"\"\n 根据指定的替换字典,对整个数据集字典中的所有数据表的需替换数据类型进行替换\n\n :param data: 待替换的数据集字典\n :param replace_dict: 替换字典\n :return: 替换后的数据集字典\n \"\"\"\n for index, index_type in replace_dict.items():\n for data_name in data:\n if index in list(data[data_name].columns):\n data[data_name][index] = data[data_name][index].fillna(0).astype(index_type)\n return data\n\n\ndef set_index_in_df_dict(data: dict, index_col_name: str) -> dict:\n \"\"\"\n 根据指定的列名,将整个数据集字典中的素有数据表的需指定索引列指定为索引\n\n :param data: 待指定的数据集字典\n :param index_col_name: 索引列名\n :return: 索引被指定后的字典\n \"\"\"\n for df_name in data:\n data[df_name].set_index(index_col_name, inplace=True)\n return data\n\n\ndef mix_dataset(data: dict, d_name: set = None) -> dict:\n \"\"\"\n 指定数据集字典、字典中数据集名称,对数据集进行混合操作\n\n :param data: 数据集字典\n :param d_name: 数据集名称集合,存在以下两种情况:\n 1. 当所有数据表在同一层目录中以X_A、X_B为数据集名读入时,传入{A, B}。\n 2. 当不同数据表按照数据集文件夹夹存放被读入时,不需要传入该参数\n :return: 混合后的数据集最颠\n \"\"\"\n need_mix = {}\n if d_name is None:\n d_name = Run_Val.dataset_names\n for k in data:\n for x in d_name:\n if str(k).endswith(x):\n tmp_name = str(k).replace('_' + x, '')\n if tmp_name in need_mix:\n need_mix[tmp_name].add(k)\n else:\n need_mix[tmp_name] = {k}\n for k, v in need_mix.items():\n if len(v) > 1:\n Log.info('合并数据表 【{}】 为 【{}】'.format(','.join(v), k))\n data[k] = pd.concat([data.get(x) for x in v], sort=True, ignore_index=True)\n for x in v:\n del data[x]\n return data\n\n\ndef convert_types(df: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n 将数据表转换为内存占用更低的数据类型\n\n :param df: 转换前的数据表\n :return: 转换后的数据表\n \"\"\"\n for c in df:\n # 将 objects 转换为 category\n if (df[c].dtype == 'object') and (df[c].nunique() < df.shape[0]):\n df[c] = df[c].astype('category')\n # 将 booleans 转为 integers\n elif set(df[c].unique()) == {0, 1}:\n df[c] = df[c].astype(bool)\n # 将 float64 转为 float32\n elif df[c].dtype == 'float64':\n df[c] = df[c].astype(np.float32)\n # 将 int64 转为 int32\n elif df[c].dtype == 'int64':\n df[c] = df[c].astype(np.int32)\n return df\n\n\ndef zip_dataset(df_dict: dict) -> dict:\n \"\"\"\n 用普适方法(内存占用更低的类型转换)压缩数据表字典\n\n :param df_dict: 待压缩数据表字典\n :return: 压缩后数据表字典\n \"\"\"\n Log.memory_used()\n for df_name in df_dict:\n Log.debug('压缩数据表 {}'.format(df_name))\n df_dict[df_name] = convert_types(df_dict[df_name])\n Log.memory_used()\n return df_dict\n","sub_path":"prepares/DealDataFrame.py","file_name":"DealDataFrame.py","file_ext":"py","file_size_in_byte":3865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"394541345","text":"\ndef firstmodify(left, right, up, bottom, margin_perc=20):\n \"\"\"\n This function is to make a square images based on the facemark + margin.\n\n Args:\n left(int): the left border of the image.\n right(int): the right border of the image.\n up(int): the bottom border of the image.\n bottom(int): the upper border of the image.\n margin_perc(int): the percentage, change it to control the margin width\n\n Returns:\n int: the new border of the image.\n\n \"\"\"\n if (right-left)>=(bottom-up):\n margin = int((right-left)*margin_perc/100)\n diff = (right-left)-(bottom-up)\n if diff%2 == 0:\n left = int(left-margin)\n right = int(right+margin)\n up = int(up-margin-diff)\n bottom = int(bottom+margin)\n\n else:\n left = int(left-margin)\n right = int(right+margin)\n up = int(up-margin-diff)\n bottom = int(bottom+margin)\n else:\n margin = int((bottom-up)*margin_perc/100)\n diff = (bottom-up)-(right-left)\n if diff%2 == 0:\n left = int(left-margin-diff/2)\n right = int(right+margin+diff/2)\n up = int(up-margin)\n bottom = int(bottom+margin)\n else:\n left = int(left-margin-(diff/2+0.5))\n right = int(right+margin+(diff/2-0.5))\n up = int(up-margin)\n bottom = int(bottom+margin)\n\n return left, right, up, bottom\n\n\n\ndef ifoverborder(left, right, up, bottom, width, height):\n if left < 0:\n right = right + (0-left)\n left = 0\n if right > width:\n right = width\n if right > width:\n left = left - (right-width)\n right = width\n if left < 0:\n left = 0\n if up < 0:\n bottom = bottom + (0-up)\n up = 0\n if bottom > height:\n bottom = height\n if bottom > height:\n up = up - (bottom - height)\n bottom = height\n if up < 0:\n up = 0\n #print(left, right, up, bottom, width, height)\n return left, right, up, bottom\n\ndef finalmodify(left, right, up, bottom):\n #print(left, right, up, bottom)\n if right - left < bottom - up:\n diff = (bottom-up)-(right-left)\n if diff%2 == 0:\n up = int(up+diff/2)\n bottom = int(bottom-diff/2)\n else:\n up = int(up+diff/2-0.5)\n bottom = int(bottom-diff/2-0.5)\n else:\n diff = (right-left)-(bottom-up)\n if diff%2 == 0:\n left = int(left+diff/2)\n right = int(right-diff/2)\n else:\n left = int(left+diff/2+0.5)\n right = int(right-diff/2+0.5)\n #print(left, right, up, bottom)\n return left, right, up, bottom\n\n","sub_path":"landmarks.py","file_name":"landmarks.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"344509328","text":"from django.urls import re_path\nfrom django.contrib.auth.views import LogoutView, LoginView\nfrom .views import user_home, CreateListView, create_list_item, list_details, delete_list, delete_list_item, \\\n delete_list_items, update_list_item, update_list\n\nurlpatterns = [\n re_path('^user_home$', user_home, name='todo_user_home'),\n re_path('^logout$', LogoutView.as_view(), name='todo_logout'),\n re_path('^login$', LoginView.as_view(template_name='todo/user_login.html'), name='todo_login'),\n re_path('^create_list$', CreateListView.as_view(), name='todo_create_list'),\n re_path('^create_list_item/(?P\\d+)/$', create_list_item, name='todo_create_list_item'),\n re_path('^list_details/(?P\\d+)/$', list_details, name='todo_list_details'),\n re_path('^delete_list/(?P\\d+)/$', delete_list, name='todo_delete_list'),\n re_path('^delete_list_item/(?P\\d+)/$', delete_list_item, name='todo_delete_list_item'),\n re_path('^delete_list_items/(?P\\d+)/$', delete_list_items, name='todo_delete_list_items'),\n re_path('^update_list/(?P\\d+)/$', update_list, name='todo_update_list'),\n re_path('^update_list_item/(?P\\d+)/$', update_list_item, name='todo_update_list_item'),\n]\n","sub_path":"todowebapp/todo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"524620562","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Views to administer plugins.\"\"\"\nfrom typing import Optional\n\nimport django_tables2 as tables\nfrom django.contrib.auth.decorators import user_passes_test\nfrom django.db.models.expressions import F\nfrom django.http.request import HttpRequest\nfrom django.http.response import HttpResponse, JsonResponse\nfrom django.shortcuts import render\nfrom django.template.loader import render_to_string\nfrom django.urls.base import reverse\nfrom django.utils.html import format_html\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom ontask.core.decorators import ajax_required\nfrom ontask.core.permissions import is_admin, is_instructor\nfrom ontask.dataops.plugin.plugin_manager import (\n load_plugin, refresh_plugin_data,\n)\nfrom ontask.models import Log, Plugin, Workflow\nfrom ontask.workflow.access import remove_workflow_from_session\n\n\nclass PluginAdminTable(tables.Table):\n \"\"\"Class to render the table with plugins present in the system.\"\"\"\n\n description_text = tables.TemplateColumn(\n verbose_name=_('Description'),\n template_name='dataops/includes/partial_plugin_description.html',\n )\n\n last_exec = tables.DateTimeColumn(verbose_name=_('Last executed'))\n\n filename = tables.Column(verbose_name=_('Folder'), empty_values=None)\n\n num_executions = tables.Column(\n verbose_name=_('Executions'),\n empty_values=[])\n\n def render_is_verified(self, record):\n \"\"\"Render is_verified as a tick or the button Diagnose.\"\"\"\n if record.is_verified:\n return format_html('')\n\n return render_to_string(\n 'dataops/includes/partial_plugin_diagnose.html',\n context={'id': record.id},\n request=None)\n\n def render_is_enabled(self, record):\n \"\"\"Render the is enabled as a checkbox.\"\"\"\n return render_to_string(\n 'dataops/includes/partial_plugin_enable.html',\n context={'record': record},\n request=None)\n\n def render_last_exec(self, record):\n \"\"\"Render the last executed time.\n\n :param record: Record being processed in the table.\n\n :return:\n \"\"\"\n log_item = Log.objects.filter(\n name=Log.PLUGIN_EXECUTE,\n payload__name=record.name,\n ).order_by(F('created').desc()).first()\n if not log_item:\n return '—'\n return log_item.created\n\n def render_num_executions(self, record):\n \"\"\"Render the last executed time.\n\n :param record: Record being processed in the table.\n\n :return:\n \"\"\"\n return Log.objects.filter(\n name=Log.PLUGIN_EXECUTE,\n payload__name=record.name,\n ).count()\n\n class Meta(object):\n \"\"\"Choose fields, sequence and attributes.\"\"\"\n\n model = Plugin\n\n fields = (\n 'filename',\n 'name',\n 'description_text',\n 'is_model',\n 'is_verified',\n 'is_enabled')\n\n sequence = (\n 'filename',\n 'name',\n 'description_text',\n 'is_model',\n 'is_verified',\n 'is_enabled',\n 'num_executions',\n 'last_exec')\n\n attrs = {\n 'class': 'table table-hover table-bordered shadow',\n 'style': 'width: 100%;',\n 'id': 'plugin-admin-table',\n 'th': {'class': 'dt-body-center'},\n 'td': {'style': 'vertical-align: middle'}}\n\n\n@user_passes_test(is_admin)\ndef plugin_admin(\n request: HttpRequest,\n) -> HttpResponse:\n \"\"\"Show the table of plugins and their status.\n\n :param request: HTTP Request\n\n :return:\n \"\"\"\n remove_workflow_from_session(request)\n\n # Traverse the plugin folder and refresh the db content.\n refresh_plugin_data(request)\n\n return render(\n request,\n 'dataops/plugin_admin.html',\n {'table': PluginAdminTable(Plugin.objects.all())})\n\n\n@user_passes_test(is_instructor)\n@ajax_required\ndef diagnose(\n request: HttpRequest,\n pk: int,\n workflow: Optional[Workflow] = None,\n) -> JsonResponse:\n \"\"\"Show the diagnostics of a plugin that failed the verification tests.\n\n :param request: HTML request object\n\n :param pk: Primary key of the transform element\n\n :return:\n \"\"\"\n # Action being used\n plugin = Plugin.objects.filter(id=pk).first()\n if not plugin:\n return JsonResponse({'html_redirect': reverse('home')})\n\n # Reload the plugin to get the messages stored in the right place.\n pinstance, msgs = load_plugin(plugin.filename)\n\n # If the new instance is now properly verified, simply redirect to the\n # transform page\n if pinstance:\n plugin.is_verified = True\n plugin.save()\n return JsonResponse({'html_redirect': reverse('dataops:plugin_admin')})\n\n # Get the diagnostics from the plugin and use it for rendering.\n return JsonResponse({\n 'html_form': render_to_string(\n 'dataops/includes/partial_diagnostics.html',\n {'diagnostic_table': msgs},\n request=request),\n })\n\n\n@user_passes_test(is_instructor)\n@ajax_required\ndef moreinfo(\n request: HttpRequest,\n pk: int,\n) -> JsonResponse:\n \"\"\"Show the detailed information about a plugin.\n\n :param request: HTML request object\n\n :param pk: Primary key of the Plugin element\n\n :return:\n \"\"\"\n # Action being used\n plugin = Plugin.objects.filter(id=pk).first()\n if not plugin:\n return JsonResponse({'html_redirect': reverse('home')})\n\n # Reload the plugin to get the messages stored in the right place.\n pinstance, msgs = load_plugin(plugin.filename)\n\n # Get the descriptions and show them in the modal\n return JsonResponse({\n 'html_form': render_to_string(\n 'dataops/includes/partial_plugin_long_description.html',\n {'pinstance': pinstance},\n request=request),\n })\n\n\n@user_passes_test(is_instructor)\n@ajax_required\ndef plugin_toggle(\n request: HttpRequest,\n pk: int,\n) -> JsonResponse:\n \"\"\"Toggle the field is_enabled of a plugin.\n\n :param request: HTML request object\n\n :param pk: Primary key of the Plugin element\n\n :return:\n \"\"\"\n plugin_item = Plugin.objects.get(pk=pk)\n if plugin_item.is_verified:\n plugin_item.is_enabled = not plugin_item.is_enabled\n plugin_item.save()\n return JsonResponse({'is_checked': plugin_item.is_enabled})\n","sub_path":"ontask/dataops/views/plugin_admin.py","file_name":"plugin_admin.py","file_ext":"py","file_size_in_byte":6471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"215510351","text":"import sys\n\nimport troposphere as Fn\nfrom troposphere import (\n ec2, iam, Template, Parameter, Ref, elasticloadbalancing\n)\nfrom troposphere.autoscaling import LaunchConfiguration, AutoScalingGroup\nfrom troposphere.route53 import RecordSetType\nfrom troposphere.policies import AutoScalingRollingUpdate, UpdatePolicy\n\n\nCOMPONENT_NAME = \"sample-app-python\"\nHEALTH_CHECK_TARGET = \"HTTP:7080/status\"\n\nt = Template()\nt.add_version()\nt.add_description(\"Stack to run the sample python application within.\")\n\nimage_id = t.add_parameter(Parameter(\n \"ImageId\",\n Description=(\"The AMI used by this component, defaults to base centos 7\"),\n Default=\"ami-9398d3e0\",\n Type=\"AWS::EC2::Image::Id\"\n))\n\nmin_size = t.add_parameter(Parameter(\n \"MinSize\",\n Description=\"Minimum number of instances to spin-up\",\n Type=\"String\",\n Default=\"2\"\n))\n\nmax_size = t.add_parameter(Parameter(\n \"MaxSize\",\n Description=\"Maximum number of instances to spin-up\",\n Type=\"String\",\n Default=\"2\"\n))\n\ninstance_type = t.add_parameter(Parameter(\n \"InstanceType\",\n Description=\"EC2 instance type to be used\",\n Type=\"String\",\n Default=\"t2.nano\"\n))\n\nvpc_id = t.add_parameter(Parameter(\n \"VpcId\",\n Description=\"The Id of the VPC to attach the environment to\",\n Type=\"AWS::EC2::VPC::Id\"\n))\n\nkey_pair_name = t.add_parameter(Parameter(\n \"KeyName\",\n Description=(\"Name of existing EC2 key-pair to enable \"\n \"SSH access to the created instances\"),\n Type=\"AWS::EC2::KeyPair::KeyName\"\n))\n\nenvironment = t.add_parameter(Parameter(\n \"Environment\",\n Description=\"The name of the environment, test or live i.e.\",\n Type=\"String\"\n))\n\nbastion_access_sg = t.add_parameter(Parameter(\n \"BastionAccessSecurityGroup\",\n Description=\"The security group allowing access from the bastions\",\n Type=\"AWS::EC2::SecurityGroup::Id\"\n))\n\nupdate_max_batch_size = t.add_parameter(Parameter(\n \"UpdateMaxBatchSize\",\n Description=(\"The maximum number of instances to be killed \"\n \"at one time during an ASG update\"),\n Default=\"1\",\n Type=\"String\"\n))\n\nupdate_min_in_service = t.add_parameter(Parameter(\n \"UpdateMinInService\",\n Description=(\"The minimum number of instances to be killed \"\n \"at one time during an ASG update\"),\n Default=\"0\",\n Type=\"String\"\n))\n\nupdate_pause_time = t.add_parameter(Parameter(\n \"UpdatePauseTime\",\n Description=(\"The time to wait between new instances coming \"\n \"online and the next batch being killed during \"\n \"an ASG update.\"),\n Default=\"PT0S\",\n Type=\"String\"\n))\n\nprivate_subnets = t.add_parameter(Parameter(\n \"PrivateSubnets\",\n Type=\"List\",\n Description=\"Comma separated list of subnets to position the ASG in\",\n))\n\npublic_subnets = t.add_parameter(Parameter(\n \"PublicSubnets\",\n Type=\"List\",\n Description=\"Comma separated list of subnets to position the ELBs in\",\n))\n\ncname_entry = t.add_parameter(Parameter(\n \"CnameEntry\",\n Type=\"String\",\n Description=\"The cname entry for the component\"\n))\n\ndomain_base = t.add_parameter(Parameter(\n \"DomainNameBase\",\n Type=\"String\",\n Description=(\n \"Base domain name (ending with a '.') \"\n \"under which new DNS entries are added\"\n ),\n))\n\nrole = t.add_resource(iam.Role(\n \"ComponentRole\",\n Path=\"/\",\n AssumeRolePolicyDocument={\n \"Statement\": [{\n \"Effect\": \"Allow\",\n \"Action\": [\"sts:AssumeRole\"],\n \"Principal\": {\"Service\": [\"ec2.amazonaws.com\"]}\n }],\n }\n))\n\ninstance_profile = t.add_resource(iam.InstanceProfile(\n \"ComponentInstanceProfile\",\n Path=\"/\",\n Roles=[Ref(role)]\n))\n\nelb_sg = t.add_resource(ec2.SecurityGroup(\n \"ELBSecurityGroup\",\n VpcId=Ref(vpc_id),\n GroupDescription=\"Only allow public traffic on 443\",\n SecurityGroupIngress=[\n ec2.SecurityGroupRule(\n IpProtocol=\"tcp\",\n FromPort=\"443\",\n ToPort=\"443\",\n CidrIp=\"0.0.0.0/0\",\n )\n ],\n SecurityGroupEgress=[]\n))\n\nelb = t.add_resource(elasticloadbalancing.LoadBalancer(\n 'ElasticLoadBalancer',\n Subnets=Ref(public_subnets),\n CrossZone=True,\n SecurityGroups=[Ref(elb_sg)],\n Listeners=[\n elasticloadbalancing.Listener(\n LoadBalancerPort=\"443\",\n InstancePort=\"7443\",\n Protocol=\"tcp\",\n InstanceProtocol=\"tcp\"\n ),\n ],\n HealthCheck=elasticloadbalancing.HealthCheck(\n Target=HEALTH_CHECK_TARGET,\n HealthyThreshold=\"3\",\n UnhealthyThreshold=\"3\",\n Interval=\"15\",\n Timeout=\"10\",\n )\n))\n\ncomponent_dns = t.add_resource(RecordSetType(\n \"ComponentDNS\",\n HostedZoneName=Ref(domain_base),\n Comment=\"CNAME redirect the component ELB\",\n Name=Fn.Join(\".\", [\n Ref(cname_entry),\n Ref(domain_base)\n ]),\n Type=\"CNAME\",\n TTL=\"60\",\n ResourceRecords=[Fn.GetAtt(elb, \"DNSName\")]\n))\n\nasg_security_group = t.add_resource(ec2.SecurityGroup(\n \"ASGSecurityGroup\",\n VpcId=Ref(vpc_id),\n GroupDescription=\"Security group for the ASG\",\n SecurityGroupIngress=[\n ec2.SecurityGroupRule(\n IpProtocol=\"tcp\",\n FromPort=\"7080\",\n ToPort=\"7080\",\n SourceSecurityGroupId=Ref(elb_sg)\n ),\n ec2.SecurityGroupRule(\n IpProtocol=\"tcp\",\n FromPort=\"7443\",\n ToPort=\"7443\",\n SourceSecurityGroupId=Ref(elb_sg)\n )\n ],\n SecurityGroupEgress=[]\n))\n\nlaunch_conf = t.add_resource(LaunchConfiguration(\n \"ComponentLaunchConfiguration\",\n KeyName=Ref(key_pair_name),\n IamInstanceProfile=Ref(instance_profile),\n ImageId=Ref(image_id),\n EbsOptimized=False,\n InstanceMonitoring=False,\n SecurityGroups=[\n Ref(bastion_access_sg),\n Ref(asg_security_group)\n ],\n InstanceType=Ref(instance_type)\n))\n\ncomponent_asg = t.add_resource(AutoScalingGroup(\n \"ComponentAutoScalingGroup\",\n UpdatePolicy=UpdatePolicy(\n AutoScalingRollingUpdate=AutoScalingRollingUpdate(\n PauseTime=Ref(update_pause_time),\n MaxBatchSize=Ref(update_max_batch_size),\n MinInstancesInService=Ref(update_min_in_service),\n )\n ),\n MinSize=Ref(min_size),\n MaxSize=Ref(max_size),\n VPCZoneIdentifier=Ref(private_subnets),\n LaunchConfigurationName=Ref(launch_conf),\n LoadBalancerNames=[Ref(elb)]\n))\n\ntemplate = t.to_json()\nif len(sys.argv) > 1:\n open(sys.argv[1], \"w\").write(template + \"\\n\")\nelse:\n print(template)\n","sub_path":"infrastructure/src/asg.py","file_name":"asg.py","file_ext":"py","file_size_in_byte":6584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"515389496","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy_xici.items import ScrapyXiciItem\n\nclass XiciIpSpider(scrapy.Spider):\n name = 'xici_ip'\n allowed_domains = ['xicidaili.com']\n start_urls = ['http://www.xicidaili.com/nn/']\n def start_requests(self):\n urls = []\n for i in range(1,3):\n url = scrapy.Request('http://www.xicidaili.com/nn/%s'%i)\n urls.append(url)\n return urls\n def parse(self, response):\n reqs = response.xpath('//*[@id=\"ip_list\"]')\n req = reqs[0].xpath('tr')\n items = []\n for i in req[1:]:\n xici_item = ScrapyXiciItem()\n xici_item['IP'] = i.xpath('td[2]/text()')[0].extract()\n xici_item['PORT'] = i.xpath('td[3]/text()')[0].extract()\n xici_item['POSTION'] = i.xpath('string(td[4])')[0].extract().strip()\n xici_item['TYPE'] = i.xpath('td[5]/text()')[0].extract()\n xici_item['ATTR'] = i.xpath('td[6]/text()')[0].extract()\n xici_item['SPEED'] = i.xpath('td[7]/div/@title').re('\\d{0,2}\\.\\d{0,}')[0]\n xici_item['LAST_CHECK_TIME'] = i.xpath('td[10]/text()')[0].extract()\n items.append(xici_item)\n return items\n","sub_path":"scrapy_xici/spiders/xici_ip.py","file_name":"xici_ip.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"298605980","text":"import requests\n\n\ndef list(event, context):\n author = event['queryStringParameters']['author'] \n\n url = 'http://winchatty.com/v2/search?author=' + author\n response = requests.get(url) \n\n return {\n \"statusCode\": response.status_code,\n \"body\": response.text\n }\n","sub_path":"chatty/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"596320603","text":"import time\nimport requests\nimport numpy as np\nimport pandas as pd\nfrom bs4 import BeautifulSoup\n\ndef scraping_user_info(store, number):\n data = []\n headers = {\n 'Cookie':'xxxxxxxxxx',\n 'Host':'www.dianping.com',\n 'Referer': 'http://www.dianping.com/shop/%s/review_all/p1'%number,\n 'User-Agent': 'xxxxxxxxxx'\n }\n\n print(store, number)\n page = 1\n while page:\n url = 'http://www.dianping.com/shop/%s/review_all/p%s'%(number, page)\n html = requests.get(url=url, headers = headers).text\n soup = BeautifulSoup(html, 'lxml')\n\n try:\n page = soup.find('div', class_='reviews-pages').find('a',class_='NextPage').get('data-pg')\n except:\n page = False\n\n main_reviews = soup.find_all('div', class_='main-review')\n for main_review in main_reviews:\n # 当前用户姓名\n name = main_review.find('div', class_=\"dper-info\").find('a', class_='name').contents[0].strip()\n # 设���\n retries = 0\n # 防止网络故障,保留三次发出网页申请的机会\n while retries < 3:\n try:\n # 当前用户在大众点评上的个人网站链接\n user_link = 'http://www.dianping.com' + main_review.find('a', class_='name').get('user_link')\n html_sub = requests.get(url=user_link, headers = headers).text\n soup_sub = BeautifulSoup(html_sub, 'lxml')\n # 每进入一次个人网页,都暂停一秒\n time.sleep(1)\n try:\n gender = soup_sub.find('span', class_='user-groun').contents[0].get('class')[0]\n except AttributeError as e:\n # 当前用户没有设置性别信息时,设置为空值\n gender = np.nan\n print(gender)\n # 当前用户的常住城市\n cities = soup_sub.find('span', class_='user-groun').contents[-1]\n # 当前用户的注册时间\n regis_times = soup_sub.find('div', class_='user-time').find_all('p')[-1].contents[-1]\n\n except AttributeError as e:\n retries += 1\n # 对当前用户网页请求失败时,设置城市与注册时间为空值\n cities = np.nan\n regis_times = np.nan\n else:\n # 对当前用户网页请求成功,并获得个人信息后,结束循环\n break\n datum = [{'names':names, 'gender':gender, 'cities':cities, 'regis_times':regis_times, 'user_links':user_links}]\n data = data + datum\n time.sleep(2)\n df = pd.DataFrame(data)\n df.to_csv('%s_user_info.csv'%store, index=False, encoding='utf8')\n","sub_path":"Python/scraping_user_info.py","file_name":"scraping_user_info.py","file_ext":"py","file_size_in_byte":2922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"346417327","text":"\"\"\"Test transpiler edge cases.\"\"\"\nfrom reasoner_transpiler.cypher import get_query\nfrom .fixtures import fixture_database\n\n\ndef test_categories(database):\n \"\"\"Test multiple categories.\"\"\"\n qgraph = {\n \"nodes\": {\"n0\": {\"categories\": [\n \"biolink:Disease\",\n \"biolink:Gene\",\n ]}},\n \"edges\": dict(),\n }\n output = list(database.run(get_query(qgraph)))[0]\n assert len(output['results']) == 8\n\n\ndef test_empty(database):\n \"\"\"Test empty qgraph.\"\"\"\n qgraph = {\n \"nodes\": dict(),\n \"edges\": dict(),\n }\n output = list(database.run(get_query(qgraph)))[0]\n assert len(output[\"results\"]) == 1\n assert output[\"results\"][0][\"node_bindings\"] == dict()\n assert output[\"results\"][0][\"edge_bindings\"] == dict()\n assert output[\"knowledge_graph\"][\"nodes\"] == []\n assert output[\"knowledge_graph\"][\"edges\"] == []\n\n\ndef test_category_none(database):\n \"\"\"Test node with type None.\"\"\"\n qgraph = {\n \"nodes\": {\n \"n0\": {\n \"ids\": \"MONDO:0005148\",\n \"categories\": None,\n }\n },\n \"edges\": dict(),\n }\n cypher = get_query(qgraph)\n output = list(database.run(cypher))[0]\n assert len(output[\"results\"]) == 1\n\n\ndef test_relation_none(database):\n \"\"\"Test edge with relation None.\"\"\"\n qgraph = {\n \"nodes\": {\n \"n0\": {\n \"categories\": \"biolink:Disease\",\n },\n \"n1\": {\n \"categories\": \"biolink:Gene\",\n },\n },\n \"edges\": {\n \"e01\": {\n \"subject\": \"n0\",\n \"object\": \"n1\",\n \"relation\": None,\n }\n },\n }\n cypher = get_query(qgraph)\n output = list(database.run(cypher))[0]\n assert len(output[\"results\"]) == 5\n\n\ndef test_qnode_addl_null(database):\n \"\"\"Test qnode with null-valued additional property.\"\"\"\n qgraph = {\n \"nodes\": {\n \"n0\": {\n \"categories\": \"biolink:Disease\",\n },\n \"n1\": {\n \"categories\": \"biolink:Gene\",\n \"chromosome\": None,\n },\n },\n \"edges\": {\n \"e01\": {\n \"subject\": \"n0\",\n \"object\": \"n1\",\n }\n },\n }\n cypher = get_query(qgraph)\n output = list(database.run(cypher))[0]\n assert len(output[\"results\"]) == 5\n\n\ndef test_predicate_none(database):\n \"\"\"Test edge with predicate None.\"\"\"\n qgraph = {\n \"nodes\": {\n \"n0\": {\n \"categories\": \"biolink:Disease\",\n },\n \"n1\": {\n \"categories\": \"biolink:Gene\",\n },\n },\n \"edges\": {\n \"e01\": {\n \"subject\": \"n0\",\n \"object\": \"n1\",\n \"predicates\": None,\n }\n },\n }\n cypher = get_query(qgraph)\n output = list(database.run(cypher))[0]\n assert len(output[\"results\"]) == 5\n\n\ndef test_fancy_key(database):\n \"\"\"Test qnode/qedge keys with unusual characters.\"\"\"\n qgraph = {\n \"nodes\": {\n \"type-2 diabetes\": {\n \"categories\": \"biolink:Disease\",\n },\n \"n1\": {\n \"categories\": \"biolink:Gene\",\n },\n },\n \"edges\": {\n \"interacts with\": {\n \"subject\": \"type-2 diabetes\",\n \"object\": \"n1\",\n }\n },\n }\n cypher = get_query(qgraph)\n output = list(database.run(cypher))[0]\n assert len(output[\"results\"]) == 5\n\n\ndef test_backwards_predicate(database):\n \"\"\"Test an extra backwards predicate.\"\"\"\n qgraph = {\n \"nodes\": {\n \"type-2 diabetes\": {\n \"id\": \"MONDO:0005148\",\n \"categories\": \"biolink:Disease\",\n },\n \"drug\": {\n \"categories\": \"biolink:ChemicalSubstance\",\n },\n },\n \"edges\": {\n \"related to\": {\n \"subject\": \"type-2 diabetes\",\n \"object\": \"drug\",\n \"predicates\": [\"biolink:related_to\", \"biolink:treats\"]\n }\n },\n }\n cypher = get_query(qgraph)\n output = list(database.run(cypher))[0]\n assert len(output[\"results\"]) == 3\n\n\ndef test_index_usage_single_labels():\n \"\"\"\n Test when using single labels, checks if id index is with the node type is used\n \"\"\"\n qgraph = {\n \"nodes\": {\n \"n0\": {\n \"ids\": [\"MONDO:0005148\"],\n \"categories\": \"biolink:Disease\",\n }\n },\n \"edges\": {}\n }\n cypher = get_query(qgraph, **{\"use_hints\": True})\n # superclass node_id is suffixed with _superclass\n assert \"USING INDEX `n0_superclass`:`biolink:Disease`(id)\" in cypher\n\n\ndef test_index_usage_multiple_labels():\n \"\"\"\n When multiple labels are used `biolink:NamedThing` index to be used\n \"\"\"\n qgraph = {\n \"nodes\": {\n \"n0\": {\n \"ids\": [\"MONDO:0005148\"],\n \"categories\": [\"biolink:Disease\", \"biolink:PhenotypicFeature\"],\n }\n },\n \"edges\": {}\n }\n cypher = get_query(qgraph, **{\"use_hints\": True})\n # superclass node_id is suffixed with _superclass\n assert \"USING INDEX `n0_superclass`:`biolink:NamedThing`(id)\" in cypher","sub_path":"tests/test_edge_cases.py","file_name":"test_edge_cases.py","file_ext":"py","file_size_in_byte":5339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"207984527","text":"from assets.resnet import ResNet50\nimport paddle.fluid as fluid\nimport paddle\nimport numpy as np\nimport sys\nsys.path.append('..')\nimport interpretdl as it\nfrom interpretdl.data_processor.readers import preprocess_image, read_image\nfrom interpretdl.data_processor.visualizer import visualize_overlay\nfrom PIL import Image\n\n\ndef int_grad_example():\n def paddle_model(data):\n class_num = 1000\n model = ResNet50()\n logits = model.net(input=data, class_dim=class_num)\n probs = fluid.layers.softmax(logits, axis=-1)\n return probs\n\n img_path = 'assets/fireboat.png'\n #https://github.com/PaddlePaddle/models/tree/release/1.8/PaddleCV/image_classification\n ig = it.IntGradCVInterpreter(paddle_model, \"assets/ResNet50_pretrained\",\n True)\n gradients = ig.interpret(\n img_path,\n label=None,\n baseline='random',\n steps=50,\n num_random_trials=2,\n visual=True,\n save_path='ig_test.jpg')\n\n\nif __name__ == '__main__':\n int_grad_example()\n","sub_path":"tutorials/int_grad_tutorial_cv.py","file_name":"int_grad_tutorial_cv.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"345202894","text":"import config\nimport requests\nimport connection\nimport json\n\nmongo = connection.mongoconnection\nredip = connection.redisip\nredomain = connection.redisdomain\nmongodb = mongo.DomainIP\nip_collection = mongodb['ipList']\ndomain_collection = mongodb['domainList']\nsession = requests.Session()\n\ndef getipdomain(obj,mode):\n session.headers = {'User-Agent': config.USER_AGENT,\"x-apikey\": config.VIRUSTOTAL_API_KEY,}\n if mode == 0:\n url = \"https://www.virustotal.com/api/v3/domains/\"+obj['domain']\n \n else:\n url = \"https://www.virustotal.com/api/v3/ip_addresses/\"+obj['ip']\n r = session.get(url)\n if r.status_code == 200:\n data = r.json()\n else:\n return None\n result = {}\n if mode == 0:\n result['domain'] = obj['domain']\n else:\n result['ip'] = obj['ip']\n if data['data']['attributes']['last_analysis_stats']['malicious']>0:\n result['status'] = 'malicious'\n else:\n result['status'] = 'clean'\n result['info'] = data['data']['attributes']['last_analysis_stats']\n return result\n\ndef add_redis(result, mode):\n if '_id' in result:\n del result['_id']\n if mode == 0:\n redomain.set(result['domain'], json.dumps(result))\n else:\n redip.set(result['ip'],json.dumps(result))\n\ndef search_redis(obj,mode):\n if mode == 0:\n result = redomain.get(obj['domain'])\n else:\n result = redip.get(obj['ip'])\n if result == None:\n return False, None\n return True, json.loads(result.decode())\n\ndef add_db(result,mode):\n if mode == 0:\n _id = domain_collection.insert_one(result).inserted_id\n else:\n _id = ip_collection.insert_one(result).inserted_id\n\ndef search_db(obj,mode):\n if mode == 0:\n result = domain_collection.find_one(obj['domain'], {'_id': False})\n else:\n result = ip_collection.find_one(obj['ip'], {'_id': False})\n return result\n\ndef checkIpDomain(obj,mode):\n flag, result = search_redis(obj,mode)\n if not flag:\n result = search_db(obj,mode)\n if result == None:\n result = getipdomain(obj,mode)\n if result == None:\n return \"Not Found\"\n add_db(result,mode)\n add_redis(result, mode)\n return result\n\n# obj = {'domain':'facebook.com'}\n# print(getipdomain(obj,0))\n","sub_path":"ipdomain.py","file_name":"ipdomain.py","file_ext":"py","file_size_in_byte":2316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"171653683","text":"import numpy as np \nimport pandas as pd \n\ndef acquire_semicon():\n df = pd.read_csv('secom.data', sep=' ', header=None)\n labels = pd.read_csv('secom_labels.data', sep=' ', header=None)\n \n df['timestamp'] = labels[1]\n df['defect'] = labels[0]\n df.defect = df.defect.apply(lambda x: 0 if x == -1 else 1)\n return df\n\n","sub_path":"acquire.py","file_name":"acquire.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"481721739","text":"import numpy as np\nimport tensorflow as tf\n\n\nclass DssmNetwork:\n\n def __init__(self, input_size, output_size, hidden_sizes, rand_seed=42):\n self.input_size = input_size\n self.output_size = output_size\n self.hidden_sizes = hidden_sizes\n self.session = None\n self.yhat = None\n self.y = None\n self.placeholderX = None\n self.session = tf.Session()\n tf.set_random_seed(rand_seed)\n\n\n def _prepare_input_data(self, data):\n number_of_samples = len(data)\n X = np.zeros((number_of_samples, self.input_size))\n for i, sample in enumerate(data):\n if isinstance(sample, tuple):\n X[i,:] = sample[0]\n else:\n X[i,:] = sample\n return X\n\n\n def _prepare_output_data(self, data):\n Y = np.array([sample[1] for sample in data])\n return Y\n\n\n def _init_weights(self, shape):\n \"\"\" Weight initialization \"\"\"\n weights = tf.random_normal(shape, stddev=0.1)\n return tf.Variable(weights)\n\n\n def _init_biases(self):\n biases = []\n for i in range(len(self.hidden_sizes) + 1):\n if i == len(self.hidden_sizes):\n in_size = self.output_size\n else:\n in_size = self.hidden_sizes[i]\n var = tf.Variable(tf.random_normal([in_size]))\n biases.append(var)\n return biases\n\n\n def _init_weights_array(self):\n weights_array = []\n for i in range(len(self.hidden_sizes) + 1):\n if i == 0:\n in_size = self.input_size\n else:\n in_size = self.hidden_sizes[i - 1]\n if i == len(self.hidden_sizes):\n out_size = self.output_size\n else:\n out_size = self.hidden_sizes[i]\n\n weights = self._init_weights((in_size, out_size))\n weights_array.append(weights)\n return weights_array\n\n\n def forwardprop(self, X, weights_array, biases_array):\n previous_layer = X\n for i, (weights, biases) in enumerate(zip(weights_array, biases_array)):\n if i == len(weights_array) - 1:\n # layer = tf.matmul(previous_layer, weights, name='output') # The \\varphi function\n layer = tf.add(tf.matmul(previous_layer, weights), biases, name='output')\n else:\n layer = tf.add(tf.matmul(previous_layer, weights), biases)\n # layer = tf.nn.sigmoid(tf.matmul(previous_layer, weights))\n previous_layer = layer\n return layer\n\n\n def init_base_variables(self):\n self.placeholderX = tf.placeholder(\"float\", shape=(None, self.input_size))\n self.y = tf.placeholder(\"float\", shape=[None, self.output_size])\n\n weights_array = self._init_weights_array()\n biases_array = self._init_biases()\n\n # Forward propagation\n self.final_vector1 = self.forwardprop(self.placeholderX, weights_array, biases_array)\n self.final_vector2 = self.forwardprop(self.placeholderX, weights_array, biases_array)\n # self.cosine = tf.losses.cosine_distance(self.final_vector1, self.final_vector2)\n\n\n def fit(self, dataset, number_of_epochs=20):\n Xdata = self._prepare_input_data(dataset)\n Ydata = self._prepare_output_data(dataset)\n\n self.init_base_variables()\n\n # Backward propagation\n # cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=self.yhat))\n # self.cost = tf.losses.cosine_distance(self.final_vector1, self.final_vector2, dim=1)\n self.cost = tf.reduce_sum(tf.multiply(self.final_vector1, self.final_vector2))\n updates = tf.train.GradientDescentOptimizer(0.01).minimize(self.cost)\n # updates = tf.train.AdamOptimizer(0.01).minimize(cost)\n\n # predict = tf.argmax(self.yhat, axis=1)\n\n # Run SGD\n init = tf.global_variables_initializer()\n self.session.run(init)\n\n for epoch in range(number_of_epochs):\n # Train with each example\n for i in range(len(Xdata)):\n result = self.session.run(updates, feed_dict={self.placeholderX: Xdata[i: i + 1],\n self.y: Ydata[i: i + 1]})\n # train_accuracy = np.mean(np.argmax(Ydata, axis=1) == self.session.run(predict,\n # feed_dict={self.placeholderX: Xdata, self.y: Ydata}))\n #\n # if epoch % 10 == 0:\n # print(\"Epoch = %d, train accuracy = %.2f%%\" % (epoch + 1, 100. * train_accuracy))\n\n\n def predict(self, dataset):\n # self.placeholderX = tf.placeholder(\"float\", shape=(None, self.input_size + 1), name='input')\n # predict = tf.argmax(self.yhat, axis=1)\n # init = tf.global_variables_initializer()\n # self.session.run(init)\n\n # output_layer = tf.get_default_graph().get_operation_by_name(name='output')\n output_layer = tf.get_default_graph().get_tensor_by_name('output:0')\n output = tf.nn.softmax(self.yhat)\n Xdata = self._prepare_input_data(dataset)\n\n result = self.session.run(output, feed_dict={self.placeholderX: Xdata})\n return result\n\n\n def save_model(self, save_path, model_name):\n if not save_path[-1:] in ['\\\\', '/']:\n save_path += '/'\n saver = tf.train.Saver()\n saver.save(self.session, save_path + model_name)\n\n\n def load_model(self, save_path, model_name):\n if not save_path[-1:] in ['\\\\', '/']:\n save_path += '/'\n saver = tf.train.import_meta_graph(save_path + model_name + '.meta')\n saver.restore(self.session, tf.train.latest_checkpoint(save_path))\n","sub_path":"networks/dssm_network.py","file_name":"dssm_network.py","file_ext":"py","file_size_in_byte":5722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"206460047","text":"from engine.globs.event_dispatcher import EventDispatcher\r\nfrom .descriptive_area import DescriptiveArea\r\nfrom .letter import LetterElement\r\nfrom engine.globs import TEXT_FG\r\nfrom pygame import font\r\n\r\n\r\nclass InventoryElement(LetterElement):\r\n active = True\r\n item = None\r\n description = None\r\n idx = 0\r\n\r\n def __init__(self, parent, item):\r\n\r\n self.item = item\r\n self.img_uns = self._create_icon_stack(21, 21, False, parent.entity)\r\n self.img_sel = self._create_icon_stack(33, 33, True, parent.entity)\r\n\r\n super().__init__(parent, self.item.nombre, None)\r\n self.description = DescriptiveArea(self, item.efecto_des)\r\n\r\n def _create_icon_stack(self, w, h, count, entity):\r\n image, _rect = self._crear_base(w, h)\r\n if count:\r\n fuente = font.Font('engine/libs/Verdana.ttf', 12)\r\n cant = entity.inventario.cantidad(self.item)\r\n render = fuente.render(str(cant), True, TEXT_FG)\r\n renderect = render.get_rect(bottom=_rect.bottom + 1, right=_rect.right - 1)\r\n image.blit(render, renderect)\r\n\r\n iconrect = self.item.image.get_rect(center=_rect.center)\r\n image.blit(self.item.image, iconrect)\r\n\r\n return image\r\n\r\n def command(self):\r\n if self.item is not None and self.item.tipo == 'consumible':\r\n value = self.item.usar(self.parent.entity)\r\n self.img_sel = self._create_icon_stack(33, 33, True, self.parent.entity)\r\n self.image = self.img_sel\r\n if value == 0:\r\n self.parent.del_item_from_cascade(self.nombre, 'Consumibles')\r\n elif self.item.tipo == 'equipable':\r\n self.parent.overwritten = True\r\n self.parent.salir()\r\n EventDispatcher.trigger('OpenMenu', 'Item', {'value': 'Equipo', \"select\": self.item.espacio})\r\n","sub_path":"engine/UI/circularmenus/elements/inventory.py","file_name":"inventory.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"590177886","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Genre',\n fields=[\n ('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),\n ('name', models.CharField(max_length=255)),\n ('parent', models.ForeignKey(related_name='sub_genres', null=True, to='haber.Genre')),\n ],\n ),\n migrations.CreateModel(\n name='News',\n fields=[\n ('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),\n ('title', models.CharField(max_length=255)),\n ('description', models.TextField(blank=True)),\n ('image', models.URLField(blank=True)),\n ('genres', models.ManyToManyField(related_name='news', to='haber.Genre')),\n ],\n ),\n migrations.CreateModel(\n name='Person',\n fields=[\n ('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),\n ('name', models.CharField(max_length=255)),\n ('date_of_birth', models.DateField()),\n ('gender', models.CharField(max_length=1, choices=[('m', 'Male'), ('f', 'Female'), ('o', 'Other')])),\n ],\n ),\n migrations.AddField(\n model_name='news',\n name='reporter',\n field=models.ForeignKey(related_name='news', to='haber.Person'),\n ),\n migrations.AddField(\n model_name='news',\n name='scored_users',\n field=models.ManyToManyField(blank=True, to=settings.AUTH_USER_MODEL),\n ),\n ]\n","sub_path":"haber/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"545144755","text":"from django.urls import re_path, path\nfrom .views import index, output_xls, table, people, people_one, PeopleFilterView, pandas_table, PersonList\napp_name=\"excel\"\n\nurlpatterns = [\n path('', index, name='index'),\n #path('table/', table, name='index'),\n path('down/', output_xls, name='down'),\n path('people/', people, name=\"people\"),\n path('peopledb/', people_one, name=\"people\"),\n path('person/', PersonList.as_view(), name='person'),\n path('pandas/', pandas_table, name='pandas'),\n # path(\"filter/\", SimpleFilteredView.as_view(), name=\"filter\"),\n path(\"filter/\", PeopleFilterView.as_view(), name=\"filter\"),\n]\n","sub_path":"Vue-RssReader/excel/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"522884466","text":"# --- <=> INTENGRANTES <=> ---\r\n# Ibrahim Zavala Hernández \r\n# Julio Gerardo Cazarez González \r\n# Pedro Saldaña Vázquez \r\n\r\n# --- <=> MODULOS <=> ---\r\nimport argparse\r\nimport cifrado\r\nimport descifrado\r\nimport crackeo\r\n\r\n# --- <=> ARGUMENTOS <=> ---\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument('--mode', type=str, dest='modo', help='Eleccion de Modo: encriptar || desencriptar || crackear')\r\nparser.add_argument('--message', type=str, dest='mensaje', help='Mensaje a Encriptar || Desencriptar || Crakear')\r\nparser.add_argument('--key', type=str, dest='clave', default='ClaveDefault', help='(Opcional) Clave para Encriptar || Desencriptar')\r\nargs=parser.parse_args()\r\n\r\nif args.modo == 'encriptar':\r\n print(\"\\n--- <=> ENCRIPTACION DE MENSAJE <=> ---\")\r\n mensaje = args.mensaje\r\n ckey = args.clave\r\n cifrado.encriptado(mensaje, ckey)\r\nelif args.modo == 'desencriptar':\r\n print(\"\\n--- <=> DESENCRIPTACION DE MENSAJE <=> ---\")\r\n mensaje = args.mensaje\r\n ckey = args.clave\r\n descifrado.desencriptado(mensaje, ckey)\r\nelif args.modo == 'crackear':\r\n print(\"\\n--- <=> CRACKEO DE MENSAJE <=> ---\")\r\n mensaje = args.mensaje\r\n crackeo.crackeado(mensaje)\r\n\r\nelse:\r\n print(\"usage: MainScript.py [-h] [--mode MODO] [--message MENSAJE] [--key CLAVE]\")","sub_path":"Phyton/E12PC/MainScript.py","file_name":"MainScript.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"551189449","text":"import input_generator as gen\n#import algorithms.scan_edf\nimport analytics as aly\n\nfor i in range(10):\n input = gen.get_input()\n count = [0 for i in range(3600 // 5)]\n for x in input:\n count[(x['timestamp'] // 5) - 1] += 1\n\n aly.get_histogram(count, title = 'request per 5 seconds', xlabel = 'request', ylabel = 'count', filename = f'{ i + 1 }')","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"301292287","text":"import os\nimport fire\nimport pickle\n\nimport numpy as np\nfrom tqdm import tqdm\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.optim import SGD\n\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom transformers import GPT2LMHeadModel, CTRLLMHeadModel, GPT2TokenizerFast, CTRLTokenizer, AdamW, get_linear_schedule_with_warmup\n\nfrom dataset import TextDataset\nfrom sample import sample\n\nimport wandb\n\nMODEL_CLASSES = {\n 'gpt2': (GPT2LMHeadModel, GPT2TokenizerFast),\n 'ctrl': (CTRLLMHeadModel, CTRLTokenizer)\n}\n\n\n# @profile\ndef finetune(train_dataset_path, val_dataset_path, save_dir, model_type, checkpoint, optimizer, lr, batch_size, gradient_accumulation_steps, epochs, accelerator, logging_steps, histogram_steps, save_steps, n_samples, sample_len, temperature, top_k, top_p, repetition_penalty, debug):\n wandb.init(project=\"transformer-experiments\")\n\n if save_dir == None:\n save_dir = wandb.run.dir\n\n if debug:\n import ptvsd\n\n print(\"Waiting for debugger attach\")\n ptvsd.enable_attach(address=('localhost', 5678),\n redirect_output=True)\n ptvsd.wait_for_attach()\n breakpoint()\n\n if accelerator == 'TPU':\n import torch_xla.core.xla_model as xm\n import torch_xla.distributed.parallel_loader as pl\n\n device = xm.xla_device()\n\n elif accelerator == 'GPU':\n device = torch.device(\n \"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n from apex import amp\n\n elif accelerator == 'CPU':\n device = torch.device(\"cpu\")\n\n train_dataset = TextDataset(train_dataset_path)\n val_dataset = TextDataset(val_dataset_path)\n\n train_dataloader = torch.utils.data.DataLoader(\n train_dataset, batch_size=batch_size, shuffle=False, num_workers=4)\n val_dataloader = torch.utils.data.DataLoader(\n val_dataset, batch_size=batch_size, shuffle=False, num_workers=4)\n\n if accelerator == 'TPU':\n # from: https://github.com/pytorch/xla/issues/1191\n def len_parallelloader(self):\n return len(self._loader._loader)\n pl.PerDeviceLoader.__len__ = len_parallelloader\n\n train_dataloader = pl.ParallelLoader(\n train_dataloader, [device]).per_device_loader(device)\n\n model, tokenizer = MODEL_CLASSES[model_type]\n\n if model_type != 'test':\n model = model.from_pretrained(checkpoint).to(device)\n tokenizer = tokenizer.from_pretrained(checkpoint)\n\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\"params\": [p for n, p in model.named_parameters() if not any(\n nd in n for nd in no_decay)], \"weight_decay\": 0.0},\n {\"params\": [p for n, p in model.named_parameters() if any(\n nd in n for nd in no_decay)], \"weight_decay\": 0.0},\n ]\n\n train_steps = int(len(train_dataloader) /\n gradient_accumulation_steps * epochs)\n\n if optimizer == 'AdamW':\n optimizer = AdamW(optimizer_grouped_parameters, lr=lr, eps=1e-8)\n elif optimizer == 'SGD':\n optimizer = SGD(optimizer_grouped_parameters, lr=lr)\n\n scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=int(\n 0.1 * train_steps), num_training_steps=train_steps)\n\n if os.path.exists(checkpoint):\n print('Loading optimizer and scheduler')\n\n optimizer.load_state_dict(torch.load(\n os.path.join(checkpoint, 'optimizer.pt')))\n scheduler.load_state_dict(torch.load(\n os.path.join(checkpoint, 'scheduler.pt')))\n\n if accelerator == 'GPU':\n model, optimizer = amp.initialize(\n model, optimizer, opt_level=\"O1\", loss_scale=\"dynamic\")\n\n wandb.watch(model, log='parameters')\n\n gradients = {}\n\n global_step = 0\n epochs_trained = 0\n steps_trained_in_current_epoch = 0\n if os.path.exists(checkpoint):\n global_step = int(checkpoint.split('-')[-1].split('/')[0])\n\n epochs_trained = global_step // (len(train_dataloader) //\n gradient_accumulation_steps)\n steps_trained_in_current_epoch = global_step % (\n len(train_dataloader) // gradient_accumulation_steps) * gradient_accumulation_steps\n\n for epoch in range(epochs_trained, epochs):\n train_loss = 0\n val_loss = 0\n\n print(f\"Epoch: {epoch}\")\n\n model.train()\n for i, batch in tqdm(enumerate(train_dataloader), total=int(len(train_dataset) / batch_size)):\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n continue\n\n inputs, labels = batch.to(device), batch.to(device)\n\n out = model(inputs, labels=labels)\n loss = out[0]\n\n loss = loss / gradient_accumulation_steps\n\n train_loss += loss.item()\n\n if accelerator == 'GPU':\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n if (i + 1) % gradient_accumulation_steps == 0:\n if accelerator == 'GPU':\n torch.nn.utils.clip_grad_norm_(\n amp.master_params(optimizer), 1)\n else:\n torch.nn.utils.clip_grad_norm_(model.parameters(), 1)\n\n if accelerator == 'TPU':\n xm.optimizer_step(optimizer, barrier=True)\n else:\n optimizer.step()\n\n scheduler.step()\n\n if global_step % logging_steps == 0:\n wandb.log({\"train_loss\": loss.item() * gradient_accumulation_steps,\n \"learning_rate\": scheduler.get_lr()[0]}, step=global_step)\n\n if global_step % histogram_steps == 0:\n for name, param in model.named_parameters():\n if param.grad is not None:\n try:\n gradients[f\"gradients/{name}\"] = wandb.Histogram(\n param.grad.detach().cpu().numpy())\n except:\n pass\n\n wandb.log(gradients, step=global_step)\n\n optimizer.zero_grad()\n\n global_step += 1\n\n # Must be in grad_accum block b/c if it is > 0, the model will get saved multiple times\n if global_step % save_steps == 0:\n print(f'Saving model at global step: {global_step}')\n checkpoint_dir = os.path.join(\n save_dir, f'checkpoint-{global_step}')\n\n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n\n model.save_pretrained(checkpoint_dir)\n tokenizer.save_pretrained(checkpoint_dir)\n torch.save(optimizer.state_dict(), os.path.join(\n checkpoint_dir, 'optimizer.pt'))\n torch.save(scheduler.state_dict(), os.path.join(\n checkpoint_dir, 'scheduler.pt'))\n\n model.eval()\n with torch.no_grad():\n for j, batch in tqdm(enumerate(val_dataloader), total=int(len(val_dataset) / batch_size)):\n inputs, labels = batch.to(device), batch.to(device)\n\n out = model(inputs, labels=labels)\n loss = out[0]\n\n val_loss += loss.item()\n\n train_loss /= (i + 1)\n val_loss /= (j + 1)\n\n train_loss *= gradient_accumulation_steps\n\n train_perplexity = torch.exp(torch.tensor(train_loss))\n val_perplexity = torch.exp(torch.tensor(val_loss))\n\n wandb.log({\"train_epoch_loss\": train_loss,\n \"train_epoch_perplexity\": train_perplexity, 'val_epoch_loss': val_loss, 'val_epoch_perplexity': val_perplexity}, step=global_step)\n\n message = f'Finished epoch {epoch} | Train loss: {train_loss} | Train perplexity: {train_perplexity} | Val Loss: {val_loss} | Val Perplexity: {val_perplexity}'\n print(message)\n\n print('Sampling from model:\\n')\n sample(\" \", model, tokenizer, length=sample_len, temperature=temperature,\n top_k=top_k, top_p=top_p, repetition_penalty=repetition_penalty, n_samples=n_samples)\n print('\\n')\n\n model.save_pretrained(save_dir)\n tokenizer.save_pretrained(save_dir)\n torch.save(optimizer.state_dict(), os.path.join(save_dir, 'optimizer.pt'))\n torch.save(scheduler.state_dict(), os.path.join(save_dir, 'scheduler.pt'))\n\n\ndef tpu(index, train_dataset_path, val_dataset_path, save_dir, model_type, checkpoint, optimizer, lr, batch_size, gradient_accumulation_steps, epochs, accelerator, logging_steps, histogram_steps, save_steps, n_samples, sample_len, temperature, top_k, top_p, repetition_penalty, debug):\n print(index)\n finetune(train_dataset_path, val_dataset_path, save_dir, model_type, checkpoint, optimizer, lr, batch_size, gradient_accumulation_steps, epochs, accelerator,\n logging_steps, histogram_steps, save_steps, n_samples, sample_len, temperature, top_k, top_p, repetition_penalty, debug)\n\n\ndef main(train_dataset_path=None, val_dataset_path=None, save_dir=None, model_type='gpt2', checkpoint='distilgpt2', optimizer='AdamW', lr=5e-5, batch_size=4, gradient_accumulation_steps=1, epochs=1, accelerator='GPU', logging_steps=10, histogram_steps=100, save_steps=100, n_samples=1, sample_len=256, temperature=1, top_k=0, top_p=0, repetition_penalty=1, debug=False, n_cores=1):\n if accelerator == 'CPU' or accelerator == 'GPU':\n finetune(train_dataset_path, val_dataset_path, save_dir, model_type, checkpoint, optimizer, lr, batch_size, gradient_accumulation_steps, epochs, accelerator,\n logging_steps, histogram_steps, save_steps, n_samples, sample_len, temperature, top_k, top_p, repetition_penalty, debug)\n else:\n import torch_xla.core.xla_model as xm\n import torch_xla.distributed.xla_multiprocessing as xmp\n\n xmp.spawn(tpu, args=(train_dataset_path, val_dataset_path, save_dir, model_type, checkpoint, optimizer, lr, batch_size, gradient_accumulation_steps, epochs, accelerator, logging_steps,\n histogram_steps, save_steps, n_samples, sample_len, temperature, top_k, top_p, repetition_penalty, debug), nprocs=n_cores)\n\n\nif __name__ == \"__main__\":\n fire.Fire(main)\n","sub_path":"finetune.py","file_name":"finetune.py","file_ext":"py","file_size_in_byte":10522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"227850113","text":"#!/usr/bin/python3\n\nimport npyscreen\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\nfrom questioners import TranslationQuestioner\nfrom configurators import TranslationConfigurator\nfrom forms import SplashScreen\n\n\nclass Examiner(npyscreen.NPSAppManaged):\n \"\"\"Main application class.\n\n Contains all widgets, menus and what not in accordance to npyscreen's workings.\"\"\"\n\n def onStart(self):\n \"\"\"Overridden from NPSAppManaged. Sets up Examiner's attributes, much like __init__ would do.\"\"\"\n\n self.create_menus()\n self.engine = create_engine('sqlite:///{}'.format('database.db'))\n Session = sessionmaker(bind=self.engine)\n self.session = Session()\n self.form_by_id = {}\n self.create_forms()\n\n def create_menus(self):\n \"\"\"Creates the root- and submenus to be attached to all forms.\n\n The menu object is attached to Examiner so that it can be passed on to forms.\"\"\"\n\n # Main menu\n self.menu = npyscreen.muNewMenu.NewMenu(name='Menu')\n\n # self.menu.addItem(text='switch', onSelect=self.change_form, shortcut='s')\n self.menu.addItem(text='Exit', onSelect=self.exit, shortcut='e')\n\n def create_forms(self):\n self.splashscreen_id = 'MAIN'\n self.translation_questioner_id = 'translation_questioner'\n # Attributes below are of type weakref.proxy\n self.splash_screen = self.addForm(self.splashscreen_id, SplashScreen, name='asdf', session=self.session, menu=self.menu)\n self.translation_questioner = self.addForm(self.translation_questioner_id,\n TranslationQuestioner,\n my_id=self.translation_questioner_id,\n session=self.session,\n menu=self.menu)\n self.form_by_id[self.translation_questioner_id] = self.translation_questioner\n def exit(self):\n \"\"\"Stops the examiner\"\"\"\n self.switchForm(None)\n\n ####################################################################################################################\n # Start of forms definitions.\n ####################################################################################################################\n\n def configure_translation_questioner(self, testables=None):\n \"\"\"Configures the translation questioner.\"\"\"\n self.translation_questioner.set_testables(testables)\n\n def start_examining(self, form_id):\n \"\"\"Switches to and starts the translation questioner form.\"\"\"\n self.switchForm(form_id)\n self.form_by_id[self.translation_questioner_id].start()\n\n\nif __name__ == '__main__':\n app = Examiner()\n app.run()\n","sub_path":"examiner.py","file_name":"examiner.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"492848099","text":"import os\n\nfeature_extraction_path = os.path.dirname(os.path.realpath(__file__))\nsrc_path = os.path.abspath(os.path.join(feature_extraction_path, os.pardir))\nVQA_path = os.path.abspath(os.path.join(src_path, os.pardir))\nmodels_path = os.path.join(VQA_path, \"models\")\n\ndef get_path(model_name, file_type=\"\"):\n if model_name == \"resnet152-1k-tf\":\n path = os.path.join(models_path, \"resnet152-1k-tf\")\n if file_type == \"graph\":\n return path + \"/ResNet-L152.meta\"\n elif file_type == \"parameters\":\n return path + \"/ResNet-L152.ckpt\"\n elif model_name == \"resnet152-11k-mxnet\":\n path = os.path.join(models_path, \"resnet152-11k-mxnet\")\n return path + \"/resnet-152\"\n elif model_name == \"resnet200-1k-mxnet\":\n path = os.path.join(models_path, \"resnet200-1k-mxnet\")\n return path + \"/resnet-200\"\n elif model_name == \"VGG19-1k-mxnet\":\n path = os.path.join(models_path, \"VGG19-1k-mxnet\")\n return path + \"/vgg19\"\n ","sub_path":"src/feature_extraction/model_path.py","file_name":"model_path.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"455405016","text":"# 기준을 잡는 것이 중요하다. \n# 1. 조건 : 회의가 일찍 끝나야 한다. \n# 2. 조건 : \n# 따라서, 끝나는 순서대로 정렬을 시켜보자 \nN = int(input())\narr = []\nfor _ in range(N):\n start_end = list(map(int,input().split()))\n arr.append(start_end)\nfor i in range(len(arr)):\n arr[i][0], arr[i][1] = arr[i][1], arr[i][0]\narr.sort()\nend_now = arr[0][0]\nresult = []\n# answer = 1\nresult.append(arr[0]) # 첫번째 끝 값 \nfor i in range(1,len(arr)):\n if arr[i][1] >= end_now:\n result.append(arr[i])\n end_now = arr[i][0]\n # answer += 1\nprint(len(result))\n# print(answer)\n\n\n","sub_path":"python_bms/ALGORITHMS/1021/1931회의실배정.py","file_name":"1931회의실배정.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"502781008","text":"# Lists with words for names processing\n\n\nFLAG_EXECUTIVE_NAME_WORDS = [\n \"ceo\",\n \"cfo\",\n \"cbo\",\n \"cto\",\n \"cso\",\n \"cro\",\n \"cho\",\n \"ir\",\n \"svp\",\n \"vice\"\n \"president\",\n \"chief\",\n \"financial\",\n \"officer\",\n \"executive\",\n \"director\",\n \"member\",\n \"chairman\",\n \"founder\",\n \"cofounder\",\n \"senior\",\n \"manager\",\n \"head\",\n \"principal\"\n]\n\nFLAG_ANALYST_NAME_WORDS = [\n \"division\",\n \"ltd\",\n \"research\",\n \"company\",\n \"bank\",\n \"inc\",\n \"co\",\n \"group\",\n \"llc\"\n]\n\nFLAG_COMPANY_NAME_WORDS = [\n \"morgan\",\n \"goldman\",\n \"citi\",\n \"morning\",\n \"hsbc\"\n]\n\nFLAG_NAME_WORDS = FLAG_EXECUTIVE_NAME_WORDS + FLAG_ANALYST_NAME_WORDS\n\nSTOP_NAME_WORDS = [\n \"okay\",\n \"thank\",\n \"you\",\n \"thanks\",\n \"bye\",\n \"hello\",\n \"good\",\n \"afternoon\",\n \"evening\",\n \"morning\",\n \"night\",\n \"welcome\",\n \"yes\",\n \"when\",\n \"that\",\n \"but\",\n \"about\",\n \"we\",\n \"me\",\n \"our\",\n \"us\",\n \"have\",\n \"are\",\n \"is\",\n \"were\",\n \"was\",\n \"been\"\n]\n\nDROP_STOP_WORDS = [\n \"of\",\n \"and\",\n \"the\",\n \"a\"\n]\n\nALL_STOP_WORDS = STOP_NAME_WORDS + DROP_STOP_WORDS\n\n","sub_path":"nlp_core/words_lists.py","file_name":"words_lists.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"633401953","text":"#!/usr/bin/env python\r\n\r\nimport re\r\n\r\nfrom Usefuls.rsConfig import RSC_II\r\nyoc = RSC_II(\"yoIVV_Config\")\r\n\r\nclass TFFile:\r\n def __init__( self, filename ):\r\n self.filename = filename\r\n self.fh = open( self.filename, \"r\" )\r\n def readline( self ):\r\n line = self.fh.readline()\r\n if line == \"\":\r\n return False\r\n else:\r\n id = re.sub(\"\\n\",\"\", line)\r\n id = re.sub(\"\\r\",\"\", id)\r\n\r\n return id\r\n def __del__( self ):\r\n self.fh.close()\r\nclass TFDic:\r\n\r\n \r\n def getTFSet(self):\r\n tfFile = TFFile(yoc.tf_list)\r\n tfSet = set()\r\n while True:\r\n line = tfFile.readline()\r\n if line == False:\r\n break\r\n else: \r\n tfSet.add(line)\r\n return tfSet","sub_path":"csplugins/trunk/ucsd/rsaito/rs_Progs/rs_Python/rs_Python_Pack/trunk/IVV_Packages/YO_Usefuls/TFDic.py","file_name":"TFDic.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"481333187","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\n\n\nbrowser = webdriver.Chrome()\nbrowser.get('http://suninjuly.github.io/huge_form.html')\n\n\ninputs = browser.find_elements(By.TAG_NAME, 'input')\nfor input in inputs:\n input.send_keys('KEK')\n\nbutton = browser.find_element(By.CSS_SELECTOR, '.btn-default')\nbutton.click()\n","sub_path":"week_1/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"401859450","text":"from urllib.request import urlopen as uReq\nfrom bs4 import BeautifulSoup as soup\n\nmy_url = \"https://www.imdb.com/search/title?title_type=feature&release_date=2008-01-01,2018-01-01&num_votes=5000,&sort=user_rating,desc\"\n\n# opening up connection grabbing the page\nuClient = uReq(my_url)\npage_html = uClient.read()\nuClient.close()\n\n# HTML parsing\npage_soup = soup(page_html, \"html.parser\")\n\n# grabs each product\ncontainers = page_soup.findAll(\"div\",{\"class\":\"lister-item-content\"})\n\nfilename = \"test.csv\"\nf = open(filename, \"w\")\nheaders = \"name, number\\n\"\nf.write(headers)\n\nfor container in containers:\n\n title_container = container.findAll(\"a\", {\"href\":\"/title/tt5963218/?ref_=adv_li_tt\"})\n title_movie = title_container[0].text\n number_container = container.findAll(\"span\", {\"class\":\"lister-item-index unbold text-primary\"})\n number_movie = number_container[0].text\n\n print(\"title_movie: \" + title_movie)\n print(\"number_movie: \" + number_movie)\n\n f.write(title_movie + \",\" +number_movie + \"\\n\")\n\nf.close()\n","sub_path":"python/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"330260121","text":"import glob\nimport numpy\nimport h5py\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-i\", \"--input_files\",type=str,default=None,\n dest=\"input_file\", help=\"names for input files\")\nparser.add_argument(\"-d\", \"--path\",type=str,default='/data/icecube/jmicallef/processed_CNN_files/',\n dest=\"path\", help=\"path to input files\")\nparser.add_argument(\"-n\", \"--name\",type=str,default=None,\n dest=\"name\", help=\"name for output file\")\nparser.add_argument(\"--old_reco\",default=False,action='store_true',\n dest=\"old_reco\",help=\"use flag if concatonating all train, test, val into one file\")\nargs = parser.parse_args()\n\nuse_old_reco = args.old_reco\nfile_name_base = args.path + args.input_file\nif not use_old_reco:\n file_names = sorted(glob.glob(file_name_base))\n print(\"Using %i files with names like %s\"%(len(file_names), file_names[0]))\nelse:\n file_names = file_name_base\n print(\"Using file %s\"%file_names)\n\nname = args.name\nif name is None:\n split_file_name = file_name_base[:-4]\n new_name = split_file_name[0]\n for name in range(1,len(split_file_name)-1):\n new_name = new_name + \"_\" + split_file_name[name]\n new_name += \".testonly.hdf5\"\n output_file = new_name\nelse:\n output_file = args.path + name + \".testonly.hdf5\"\n\n# Put all the test sets together\nY_test_use = None\nX_test_DC_use = None\nX_test_IC_use = None\n\nif use_old_reco:\n f = h5py.File(file_names, 'r')\n Y_test = f['Y_test'][:]\n X_test_DC = f['X_test_DC'][:]\n X_test_IC = f['X_test_IC'][:]\n Y_train = f['Y_train'][:]\n X_train_DC = f['X_train_DC'][:]\n X_train_IC = f['X_train_IC'][:]\n Y_validate = f['Y_validate'][:]\n X_validate_DC = f['X_validate_DC'][:]\n X_validate_IC = f['X_validate_IC'][:]\n reco_test = f['reco_test'][:]\n reco_train = f['reco_train'][:]\n reco_validate = f['reco_validate'][:]\n f.close()\n del f\n\n print(\"Loaded all %i events\"%(Y_test.shape[0]+Y_train.shape[0]+Y_validate.shape[0]))\n\n Y_test_use = numpy.concatenate((Y_test,Y_train,Y_validate))\n print(\"Concatted Y\")\n del Y_test\n del Y_train\n del Y_validate\n X_test_DC_use = numpy.concatenate((X_test_DC,X_train_DC,X_validate_DC))\n print(\"Concatted DC\")\n del X_test_DC\n del X_train_DC\n del X_validate_DC\n X_test_IC_use =numpy.concatenate((X_test_IC,X_train_IC,X_validate_IC))\n print(\"Concatted IC\")\n del X_test_IC\n del X_train_IC\n del X_validate_IC\n reco_test_use = numpy.concatenate((reco_test,reco_train,reco_validate))\n del reco_test\n del reco_train\n del reco_validate\n print(\"Concatted reco\")\n\nelse:\n for file in file_names:\n f = h5py.File(file, 'r')\n Y_test = f['Y_test'][:]\n X_test_DC = f['X_test_DC'][:]\n X_test_IC = f['X_test_IC'][:]\n reco_test = f['reco_test'][:]\n f.close()\n del f\n\n if Y_test_use is None:\n Y_test_use = Y_test\n X_test_DC_use = X_test_DC\n X_test_IC_use = X_test_IC\n reco_test_use = reco_test\n else:\n Y_test_use = numpy.concatenate((Y_test_use, Y_test))\n X_test_DC_use = numpy.concatenate((X_test_DC_use, X_test_DC))\n X_test_IC_use = numpy.concatenate((X_test_IC_use, X_test_IC))\n reco_test_use = numpy.concatenate((reco_test_use, reco_test))\n\nprint(Y_test_use.shape)\n\nprint(\"Saving output file: %s\"%output_file)\nf = h5py.File(output_file, \"w\")\nf.create_dataset(\"Y_test\", data=Y_test_use)\nf.create_dataset(\"X_test_DC\", data=X_test_DC_use)\nf.create_dataset(\"X_test_IC\", data=X_test_IC_use)\n#if use_old_reco:\nf.create_dataset(\"reco_test\", data=reco_test_use)\nf.close()\n","sub_path":"LowEnergyNeuralNetwork/make_test_file.py","file_name":"make_test_file.py","file_ext":"py","file_size_in_byte":3710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"549309554","text":"import pandas as pd\nfrom random import randrange\nimport os\nimport time\nfrom pathlib import Path\nclass userGeneratorClass:\n \"\"\"\n Adibita al caricamento e gestione dei nomi da utilizzare per gli utenti\n \"\"\"\n\n def __init__(self, dataset_path):\n #caricamento files\n self.dataset = pd.read_csv(dataset_path, header=None, index_col=0)\n #generazione nome\n self.nome = self.dataset[1].sample().values[0]\n self.nome = str(self.nome).lower()\n #generazione cognome\n self.cognome = self.dataset[2].sample().values[0]\n self.cognome = str(self.cognome).lower()\n self.dati_anagrafici = self.anagrafica()\n #generazione username\n self.username = self.nome + self.cognome + str(randrange(1000))\n self.username = self.username[:30]\n self.username = self.username.replace(\" \", \"\") + \"@mail.com\"\n #creazione password composta come numero+nome+cognome+anno\n self.psw = (str(randrange(100)) + self.nome + self.cognome + self.dati_anagrafici.anno).replace(\" \", \"\")\n\n class anagrafica:\n mesi = [\"Gennaio\", \"Febbrio\", \"Marzo\", \"Aprile\", \"Maggio\", \"Giugno\", \"Luglio\", \"Agosto\", \"Settembre\", \"Ottobre\",\n \"Novembre\", \"Dicembre\"]\n def __init__(self):\n # GENERAZIONE ANAGRAFICA\n self.giorno = str(randrange(1, 28)) # fino a 28 per non sballare con febbraio\n self.anno = str(randrange(1930, 2000))\n indice = randrange(0, 11)\n self.mese = self.mesi[indice]\n\n\n\n\n\n","sub_path":"populating database pycharm/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"147360945","text":"#!/usr/bin/env python3\n\nimport os, sys, time, json\nimport shutil\nimport traceback\nfrom uuid import uuid4\nfrom threading import Thread\nfrom configPath import testPath\n\n# master path #\nsys.path.insert(0,testPath)\n\n# Import wapi #\nfrom webwhatsapi import WhatsAPIDriver\nfrom webwhatsapi.objects.message import Message, MediaMessage\n\n# modules system #\nfrom models import _selenium,_wapi,observable\n\n# Variables #\ndriver = None\n\nprint(\"Start v2.0.0\")\n\nprint(\"Connection to selenium\")\n\ndriver = _selenium.con()\n\nprint(\"Check if have cache\")\nsessionOn = _wapi.rememberSession(driver)\n\nprint(\"Session is {}\".format(sessionOn))\n\nif sessionOn == False :\n print(\"Get qr code\")\n qrName = _wapi.getQrCode(driver)\n\n print(\"Le name file is {}\",qrName)\n if isinstance(qrName, str) :\n driver.wait_for_login(40)\n driver.save_firefox_profile(True)\n else :\n print(\"session failed\")\n\nprint(\"Session success\")\n\nprint(\"General info of account\")\nprint(_wapi.getGeneralInfo(driver))\n\nprint(\"Get all chats\")\nprint(_wapi.getOldMessages(driver))\n\nprint(\"Start lisener of new messages\")\ndriver.subscribe_new_messages(observable.NewMessageObserver(\"test\",driver))\n\n \n\n\n","sub_path":"Test/allProcess.py","file_name":"allProcess.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"389120931","text":"\"\"\"Saijil Nemchund\r\nNMCSAI001\r\nQuestion 2 \r\nProgram that is used to so vector calculations \"\"\" \r\n\r\nimport math #gaining access to the math library \r\na=[]\r\nb=[]\r\n#fill the vectors\r\nentA=input(\"Enter vector A:\\n\")\r\na.append(eval(entA.split()[0]))\r\na.append(eval(entA.split()[1]))\r\na.append(eval(entA.split()[2]))\r\nentB=input(\"Enter vector B:\\n\")\r\nb.append(eval(entB.split()[0]))\r\nb.append(eval(entB.split()[1]))\r\nb.append(eval(entB.split()[2]))\r\n#addition\r\nad1=str(a[0]+b[0])\r\nad2=str(a[1]+b[1])\r\nad3=str(a[2]+b[2])\r\nprint(\"A+B = [\"+ad1+\", \"+ad2+\", \"+ad3+\"]\")\r\n#multiplication\r\nmul1=a[0]*b[0]\r\nmul2=a[1]*b[1]\r\nmul3=a[2]*b[2]\r\ndot=str(mul1+mul2+mul3)\r\nprint(\"A.B = \"+dot)\r\n#norm of A\r\nsqA1=a[0]**2\r\nsqA2=a[1]**2\r\nsqA3=a[2]**2\r\nsqA=\"{0:.2f}\".format(math.sqrt((sqA1+sqA2+sqA3)))\r\nprint(\"|A| =\",sqA)\r\n#norm of B\r\nsqB1=b[0]**2\r\nsqB2=b[1]**2\r\nsqB3=b[2]**2\r\nsqB=\"{0:.2f}\".format(math.sqrt((sqB1+sqB2+sqB3)))\r\nprint(\"|B| =\",sqB)\r\n","sub_path":"examples/data/Assignment_6/nmcsai001/question2.py","file_name":"question2.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"610207623","text":"import voluptuous as vol\n\nfrom esphome.components import i2c, sensor\nimport esphome.config_validation as cv\nfrom esphome.const import CONF_ADDRESS, CONF_ID, CONF_NAME, CONF_RESOLUTION, \\\n CONF_UPDATE_INTERVAL\nfrom esphome.cpp_generator import Pvariable, add\nfrom esphome.cpp_helpers import setup_component\nfrom esphome.cpp_types import App\n\nDEPENDENCIES = ['i2c']\n\nBH1750Resolution = sensor.sensor_ns.enum('BH1750Resolution')\nBH1750_RESOLUTIONS = {\n 4.0: BH1750Resolution.BH1750_RESOLUTION_4P0_LX,\n 1.0: BH1750Resolution.BH1750_RESOLUTION_1P0_LX,\n 0.5: BH1750Resolution.BH1750_RESOLUTION_0P5_LX,\n}\n\nBH1750Sensor = sensor.sensor_ns.class_('BH1750Sensor', sensor.PollingSensorComponent,\n i2c.I2CDevice)\n\nPLATFORM_SCHEMA = cv.nameable(sensor.SENSOR_PLATFORM_SCHEMA.extend({\n cv.GenerateID(): cv.declare_variable_id(BH1750Sensor),\n vol.Optional(CONF_ADDRESS, default=0x23): cv.i2c_address,\n vol.Optional(CONF_RESOLUTION): vol.All(cv.positive_float, cv.one_of(*BH1750_RESOLUTIONS)),\n vol.Optional(CONF_UPDATE_INTERVAL): cv.update_interval,\n}).extend(cv.COMPONENT_SCHEMA.schema))\n\n\ndef to_code(config):\n rhs = App.make_bh1750_sensor(config[CONF_NAME], config[CONF_ADDRESS],\n config.get(CONF_UPDATE_INTERVAL))\n bh1750 = Pvariable(config[CONF_ID], rhs)\n if CONF_RESOLUTION in config:\n add(bh1750.set_resolution(BH1750_RESOLUTIONS[config[CONF_RESOLUTION]]))\n sensor.setup_sensor(bh1750, config)\n setup_component(bh1750, config)\n\n\nBUILD_FLAGS = '-DUSE_BH1750'\n","sub_path":"esphome/components/sensor/bh1750.py","file_name":"bh1750.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"392239241","text":"import urllib.request\nimport os\n\ndef getFilename(link):\n idx = link.rfind(\"/\")\n idx = idx+1 if idx > -1 else 0\n return link[idx:]\n\ndef downloadResource(url, folder=\".\"):\n file = getFilename(url)\n path = os.path.join(\"out/celebs\", file)\n print(file, path)\n if os.path.exists(path):\n return # skip already downloaded files (maybe check size > 0?)\n urllib.request.urlretrieve(url, path)\n\nfor i in range(100):\n url = \"http://s3.amazonaws.com/cadl/celeb-align/\" + str(i+1).zfill(6) + \".jpg\"\n print(url)\n downloadResource(url)","sub_path":"week_2/papparazi.py","file_name":"papparazi.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"619899606","text":"class fond_index_history:\n __slots__='index_id','date','year','currency','open_price','min_price','max_price','close_price','volume'\n def __init__(self,index_id,date,year,currency,open_price,min_price,max_price,close_price,volume):\n self.index_id=index_id\n self.year=year\t\t\n self.date=date\n self.currency=currency\n self.open_price=open_price\n self.min_price=min_price\n self.max_price=max_price\n self.close_price=close_price\n self.volume=volume","sub_path":"webtrade/attributes/fond_index_history.py","file_name":"fond_index_history.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"290781734","text":"# Statement for enabling the development environment\nDEBUG = True\n'''\nDEBUG: setting this to True activates the debug mode on the app.\nThis allows us to use the Flask debugger in case of an unhandled\nexception, and also automatically reloads the application when it\nis updated. It should however always be set to False in production.\nIt defaults to False.\n'''\n\n#SQLALCHEMY_ECHO = True\n'''\nSQLALCHEMY_ECHO: setting this to True helps us with debugging by\nallowing SQLAlchemy to log errors.\n'''\n\n# Define the application directory\nimport os\nBASE_DIR = os.path.abspath(os.path.dirname(__file__))\nWTF_CSRF_ENABLED = True\nSECRET_KEY = os.urandom(32)\n'''\nPretty simple, it's just two settings that our Flask-WTF extension needs. The WTF_CSRF_ENABLED setting activates the cross-site request forgery prevention (note that this setting is enabled by default in current versions of Flask-WTF). In most cases you want to have this option enabled as it makes your app more secure.\n\nThe SECRET_KEY setting is only needed when CSRF is enabled, and is used to create a cryptographic token that is used to validate a form. When you write your own apps make sure to set the secret key to something that is difficult to guess.\n\n'''\n# creates database in the current folder\n#abspath-> absolute path function\n#print(BASE_DIR)-> prints the path of current directory\n#some constants get declared in the file :- __name__ __file__(current file name)\n\n\n# Define the database - we are working with\n# SQLite for this example\nSQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(BASE_DIR, 'app.db')\n#SQLALCHEMY_DATABASE_URI = 'mysql://root:password@localhost/bookmyshow_db'\nSQLALCHEMY_TRACK_MODIFICATIONS = False\n\n#joins the path with app.db\nDATABASE_CONNECT_OPTIONS = {}\n\n# Application threads. A common general assumption is\n# using 2 per available processor cores - to handle\n# incoming requests using one and performing background\n# operations using the other.\nTHREADS_PER_PAGE = 2\n\n# Enable protection agains *Cross-site Request Forgery (CSRF)*\nCSRF_ENABLED = True\n\n# Use a secure, unique and absolutely secret key for\n# signing the data.\n#CSRF_SESSION_KEY = \"secret\"\n\n# Secret key for signing cookies\nSECRET_KEY = \"secret\"\n","sub_path":"src/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"252161611","text":"#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport argparse\nimport copy\nimport io\nimport logging\nimport os\nimport time\nimport typing\n\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.optim\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport torchelastic\nimport torchelastic.distributed as edist\nimport torchvision.datasets as datasets\nimport torchvision.models as models\nimport torchvision.transforms as transforms\nfrom torch.nn.parameter import Parameter\nfrom torchelastic.p2p.coordinator_p2p import CoordinatorP2P\nfrom torchelastic.utils.data import CyclingIterator, ElasticDistributedSampler\nfrom torchvision.models.resnet import BasicBlock, Bottleneck\n\n\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.INFO)\nlogging.basicConfig(\n level=logging.INFO, format=\"[%(levelname)s] %(asctime)s %(module)s: %(message)s\"\n)\n\n\nclass TrainParams(typing.NamedTuple):\n num_data_workers: int = 8\n num_epochs: int = 90\n base_learning_rate: float = 0.0125\n batch_per_device: int = 32\n benchmark_num_iter: int = 500\n benchmark_ddp_bucket_size: int = 25\n\n\ndef adjust_learning_rate(world_size, params, optimizer, epoch, num_iter, iter_index):\n \"\"\"\n Sets the learning rate to the initial LR decayed by 10 every 30 epochs\n \"\"\"\n\n # Trick: lr scales linearly with world size with warmup\n if epoch < 5:\n lr_step = (world_size - 1) * params.base_learning_rate / (5.0 * num_iter)\n lr = params.base_learning_rate + (epoch * num_iter + iter_index) * lr_step\n elif epoch < 80:\n lr = world_size * params.base_learning_rate * (0.1 ** (epoch // 30))\n else:\n lr = world_size * params.base_learning_rate * (0.1 ** 3)\n for param_group in optimizer.param_groups:\n lr_old = param_group[\"lr\"]\n param_group[\"lr\"] = lr\n # Trick: apply momentum correction when lr is updated\n if lr > lr_old:\n param_group[\"momentum\"] = lr / lr_old * 0.9 # momentum\n else:\n param_group[\"momentum\"] = 0.9 # default momentum\n return\n\n\nclass ImagenetState(torchelastic.State):\n \"\"\"\n Client-provided State object; it is serializable and captures the entire\n state needed for executing one iteration of training\n \"\"\"\n\n def __init__(self, model, params, dataset, num_epochs, epoch=0):\n self.model = model\n self.params = params\n self.dataset = dataset\n self.total_batch_size = params.batch_per_device\n\n self.num_epochs = num_epochs\n self.epoch = epoch\n\n self.iteration = 0\n self.data_start_index = 0\n self.model_state = {}\n\n def sync(self, world_size, rank):\n self._sync_state(rank)\n\n # re-initialize model\n self._init_model()\n\n # re-initialize data loader\n self._init_data_loader()\n\n return self\n\n def capture_snapshot(self):\n # need only capture mutable fields\n snapshot = {}\n snapshot[\"epoch\"] = self.epoch\n snapshot[\"iteration\"] = self.iteration\n snapshot[\"data_start_index\"] = self.data_start_index\n snapshot[\"model_state\"] = copy.deepcopy(self.model_state)\n return snapshot\n\n def apply_snapshot(self, snapshot):\n self.epoch = snapshot[\"epoch\"]\n self.iteration = snapshot[\"iteration\"]\n self.data_start_index = snapshot[\"data_start_index\"]\n self.model_state = snapshot[\"model_state\"]\n\n def _sync_state(self, rank):\n # broadcast from the max rank with the biggest start index\n max_rank, _ = edist.all_gather_return_max_long(self.data_start_index)\n\n # Broadcast the state from max_rank\n buffer = io.BytesIO()\n self.save(buffer)\n state_tensor = torch.ByteTensor(list(buffer.getvalue()))\n state_size = torch.LongTensor([state_tensor.size()])\n dist.broadcast(state_size, src=max_rank)\n\n if rank != max_rank:\n state_tensor = torch.ByteTensor([0 for _ in range(state_size[0])])\n\n dist.broadcast(state_tensor, src=max_rank)\n\n buffer = io.BytesIO(state_tensor.numpy().tobytes())\n self.load(buffer)\n\n log.info(\n f\"Rank {rank}: Model state synced from rank: {max_rank}\\n\"\n f\"\\tbatch_size={self.total_batch_size}\\n\"\n f\"\\tnum_data_workers={self.params.num_data_workers}\\n\"\n f\"\\tdata_start_index={self.data_start_index}\\n\"\n f\"\\titeration={self.iteration}\\n\"\n f\"\\tepoch={self.epoch}/{self.num_epochs}\"\n )\n\n def _init_model(self):\n local_rank = dist.get_rank() % torch.cuda.device_count()\n\n self.dist_model = torch.nn.parallel.DistributedDataParallel(\n self.model,\n device_ids=[local_rank], # Tells DDP to work on a single GPU\n output_device=local_rank, # Tells DDP to work on a single GPU\n broadcast_buffers=False,\n check_reduction=True,\n )\n\n self.criterion = nn.CrossEntropyLoss().cuda()\n self.optimizer = torch.optim.SGD(\n self.dist_model.parameters(),\n self.params.base_learning_rate,\n momentum=0.9,\n weight_decay=1e-4,\n )\n\n if self.data_start_index > 0:\n self.dist_model.load_state_dict(self.model_state)\n\n def _data_iter_generator_fn(self, epoch):\n self.epoch = epoch\n sampler = ElasticDistributedSampler(\n dataset=self.dataset,\n num_replicas=dist.get_world_size(),\n rank=dist.get_rank(),\n start_index=self.data_start_index,\n )\n sampler.set_epoch(epoch)\n\n num_data_workers = self.params.num_data_workers\n self.data_loader = torch.utils.data.DataLoader(\n self.dataset,\n batch_size=self.total_batch_size,\n shuffle=(sampler is None),\n num_workers=num_data_workers,\n pin_memory=True,\n sampler=sampler,\n multiprocessing_context=None if num_data_workers == 0 else \"forkserver\",\n )\n\n return iter(self.data_loader)\n\n def _init_data_loader(self):\n self.data_iter = CyclingIterator(\n n=self.num_epochs,\n generator_fn=self._data_iter_generator_fn,\n start_epoch=self.epoch,\n )\n\n\ndef single_trainer(\n local_rank,\n max_world_size,\n c10d_backend,\n rdzv_init_url,\n model_arch,\n training_params,\n input_path,\n):\n \"\"\"\n Single GPU trainer that will only train on the GPU specified by local_rank\n\n \"\"\"\n\n log.info(f\"Loading data from: {input_path}\")\n normalize = transforms.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]\n )\n\n train_dataset = datasets.ImageFolder(\n input_path,\n transforms.Compose(\n [\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ]\n ),\n )\n\n log.info(f\"Loading model: {model_arch}\")\n model = models.__dict__[model_arch]()\n # Apply ResNet training in one hour's tricks to the model itself\n # to maintain the accuracy\n for m in model.modules():\n # Trick 1: the last BatchNorm layer in each block need to\n # be initialized as zero gamma\n if isinstance(m, BasicBlock):\n num_features = m.bn2.num_features\n m.bn2.weight = Parameter(torch.zeros(num_features))\n if isinstance(m, Bottleneck):\n num_features = m.bn3.num_features\n m.bn3.weight = Parameter(torch.zeros(num_features))\n # Trick 2: linear layers are initialized by\n # drawing weights from a zero-mean Gaussian with\n # standard deviation of 0.01. In the paper it was only\n # fc layer, but in practice we found this better for\n # accuracy.\n if isinstance(m, nn.Linear):\n m.weight.data.normal_(0, 0.01)\n\n model.train()\n\n torch.cuda.set_device(local_rank)\n device = torch.cuda.current_device()\n model.cuda()\n log.info(f\"Rank [{local_rank}] running on GPU [{device}]\")\n\n coordinator = CoordinatorP2P(\n c10d_backend=c10d_backend,\n init_method=rdzv_init_url,\n max_num_trainers=max_world_size,\n process_group_timeout=60000,\n )\n\n state = ImagenetState(\n model=model,\n params=training_params,\n dataset=train_dataset,\n num_epochs=training_params.num_epochs,\n )\n\n log.info(f\"Entering torchelastic train_loop\")\n torchelastic.train(coordinator, train_step, state)\n\n\ndef train_step(state: ImagenetState):\n \"\"\"\n The client-provided train_step(); it does one iteration of training\n \"\"\"\n\n start = time.time()\n input, target = next(state.data_iter)\n\n # This is needed because the world size may change between iterations\n world_size = dist.get_world_size()\n # Adjust the learning rate based on the epoch\n adjust_learning_rate(\n world_size,\n state.params,\n state.optimizer,\n state.epoch,\n len(state.data_loader),\n state.iteration,\n )\n\n target = target.cuda(non_blocking=True)\n input_var = torch.autograd.Variable(input)\n target_var = torch.autograd.Variable(target)\n\n # Compute output\n output = state.dist_model(input_var)\n loss = state.criterion(output, target_var)\n\n # Compute gradient and do SGD step\n state.optimizer.zero_grad()\n loss.backward()\n state.optimizer.step()\n\n # Only log for \"local master\" - assumes homogeneous # gpus per node\n if dist.get_rank() % torch.cuda.device_count() == 0:\n data_idx = state.data_start_index + (state.iteration * state.total_batch_size)\n log.info(\n f\"epoch: {state.epoch}, iteration: {state.iteration}, data_idx: {data_idx}\"\n )\n\n state.data_start_index += world_size * state.total_batch_size\n state.iteration += 1\n state.model_state = state.dist_model.state_dict()\n\n end = time.time()\n # each train_step processes one mini_batch\n # measuring wall-clock time on the host may not be totally accurate\n # as CUDA kernels are asynchronous, this is for illustration purposes only\n batch_per_sec = 1 / (end - start)\n return state, torchelastic.SimpleWorkerStats(batch_per_sec)\n\n\ndef default_local_world_size():\n \"\"\"\n If CUDA is available, returns the number of GPU devices on the host.\n Otherwise returns 1.\n \"\"\"\n if torch.cuda.is_available():\n return torch.cuda.device_count()\n else:\n return 1\n\n\ndef default_device():\n \"\"\"\n gpu if this host has a GPU, otherwise cpu\n \"\"\"\n return \"gpu\" if torch.cuda.is_available() else \"cpu\"\n\n\ndef main():\n # these parameters should typically be set by the scheduler/resource manager\n # hence read them from environment variables rather than program args\n num_nodes = os.environ.get(\"SIZE\", 1)\n min_num_nodes = os.environ.get(\"MIN_SIZE\", num_nodes)\n max_num_nodes = os.environ.get(\"MAX_SIZE\", num_nodes)\n\n rdzv_endpoint = os.environ.get(\"RDZV_ENDPOINT\", \"localhost:2379\")\n job_id = os.environ.get(\"JOB_ID\", \"torchelastic_imagenet_example\")\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--input_path\",\n required=True,\n help=\"Path to the directory containing the dataset\",\n )\n\n parser.add_argument(\n \"--num_data_workers\", type=int, default=0, help=\"Number of data loader workers\"\n )\n parser.add_argument(\n \"--epochs\", type=int, default=1, help=\"Number of training epochs\"\n )\n parser.add_argument(\n \"--model_arch\", default=\"resnet101\", help=\"Model architecture (see)\"\n )\n\n parser.add_argument(\n \"--c10d_backend\", default=\"gloo\", choices=[\"gloo\", \"nccl\"], help=\"c10d backend\"\n )\n\n args = parser.parse_args()\n training_params = TrainParams(\n num_data_workers=args.num_data_workers,\n num_epochs=args.epochs,\n base_learning_rate=0.1,\n batch_per_device=32,\n benchmark_num_iter=500,\n benchmark_ddp_bucket_size=25,\n )\n\n local_world_size = default_local_world_size()\n min_world_size = local_world_size * min_num_nodes\n max_world_size = local_world_size * max_num_nodes\n rdzv_init_method = (\n f\"etcd://{rdzv_endpoint}/{job_id}\"\n f\"?min_workers={min_world_size}\"\n f\"&max_workers={max_world_size}\"\n f\"&last_call_timeout=5\"\n )\n\n log.info(f\"rdzv init method={rdzv_init_method}\")\n if local_world_size == 1:\n local_rank = 0\n single_trainer(\n local_rank,\n max_world_size,\n args.c10d_backend,\n rdzv_init_method,\n args.model_arch,\n training_params,\n args.input_path,\n )\n else:\n mp.spawn(\n fn=single_trainer,\n nprocs=local_world_size,\n args=(\n max_world_size,\n args.c10d_backend,\n rdzv_init_method,\n args.model_arch,\n training_params,\n args.input_path,\n ),\n )\n\n\nif __name__ == \"__main__\":\n mp.freeze_support()\n main()\n","sub_path":"examples/imagenet/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"617895780","text":"from approxeng.input import CentredAxis, TriggerAxis, Button, Controller, BinaryAxis\nfrom approxeng.input.selectbinder import ControllerResource\nfrom approxeng.input.controllers import ControllerRequirement, print_devices\n\n\nclass WirelessXBoxOnePad(Controller):\n \"\"\"\n Wireless XBox One controller, tested with the older controller that do not use bluetooth and are supplied with the\n XBox One 2014 through USB wire connection.\n \"\"\"\n\n def __init__(self, dead_zone=0.1, hot_zone=0.05):\n \"\"\"\n Create a new xbox one controller instance\n :param float dead_zone:\n Used to set the dead zone for each :class:`approxeng.input.CentredAxis` and\n :class:`approxeng.input.TriggerAxis` in the controller.\n :param float hot_zone:\n Used to set the hot zone for each :class:`approxeng.input.CentredAxis` and\n :class:`approxeng.input.TriggerAxis` in the controller.\n \"\"\"\n super(WirelessXBoxOnePad, self).__init__(\n controls=[\n Button(\"BTN_NORTH\", 307, sname='square'),\n Button(\"BTN_WEST\", 308, sname='triangle'),\n Button(\"BTN_B\", 305, sname='circle'),\n Button(\"BTN_A\", 304, sname='cross'),\n Button(\"BTN_THUMBR\", 318, sname='rs'),\n Button(\"BTN_THUMBL\", 317, sname='ls'),\n Button(\"BTN_SELECT\", 314, sname='select'),\n Button(\"BTN_START\", 315, sname='start'),\n Button(\"BTN_MODE\", 316, sname='home'),\n Button(\"BTN_TL\", 310, sname='l1'),\n Button(\"BTN_TR\", 311, sname='r1'),\n CentredAxis(\"ABS_X\", -32768, 32767, 0, sname='lx'),\n CentredAxis(\"ABS_Y\", -32768, 32767, 1, invert=True, sname='ly'),\n CentredAxis(\"ABS_RX\", -32768, 32767, 3, sname='rx'),\n CentredAxis(\"ABS_RY\", -32768, 32767, 4, invert=True, sname='ry'),\n TriggerAxis(\"ABS_Z\", 0, 1023, 2, sname='lt', button_sname='l2', button_trigger_value=0.2),\n TriggerAxis(\"ABS_RZ\", 0, 1023, 5, sname='rt', button_sname='r2', button_trigger_value=0.2),\n BinaryAxis(\"ABS_HAT0X\", 16, b1name='dleft', b2name='dright'),\n BinaryAxis(\"ABS_HAT0Y\", 17, b1name='dup', b2name='ddown')\n ],\n dead_zone=dead_zone,\n hot_zone=hot_zone)\n\n @staticmethod\n def registration_ids():\n \"\"\"\n :return: list of (vendor_id, product_id) for this controller\n \"\"\"\n return [(0x45e, 0x2d1)]\n\n def __repr__(self):\n return 'Microsoft X-Box One pad'\n\n print('DEBUG: list controllers found')\n print(print_devices())\n\n# Get a joystick\nwith ControllerResource(ControllerRequirement(require_class=WirelessXBoxOnePad)) as joystick:\n # Loop until we're disconnected\n while joystick.connected:\n # This is an instance of approxeng.input.ButtonPresses\n left_y = joystick['ly']\n if left_y != 0:\n print('left x is %s' % left_y)\n if left_y == 1:\n print('left x is %s' % left_y)\n elif left_y == -1:\n print('left x is %s' % left_y)\n else:\n print('left x is %s' % left_y)\n presses = joystick.check_presses()\n if presses['square']:\n print('SQUARE pressed since last check')\n # We can also use attributes directly, and get at the presses object from the controller:\n if joystick.presses.circle:\n print('CIRCLE pressed since last check')\n # Or we can use the 'x in y' syntax:\n if 'triangle' in presses:\n print('TRIANGLE pressed since last check')\n\n # If we had any presses, print the list of pressed buttons by standard name\n if joystick.has_presses:\n print(joystick.presses)\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"463987448","text":"class Solution:\n def findShortestSubArray(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n degree = dict()\n maxDegree = 0\n for i in range(len(nums)):\n degree.setdefault(nums[i], (-1, -1, 0))\n first = degree[nums[i]][0]\n num = degree[nums[i]][2]\n if num == 0:\n degree[nums[i]] = (i, -1, 1)\n elif num == 1:\n degree[nums[i]] = (first, i, 2)\n else:\n degree[nums[i]] = (first, i, num + 1)\n if num + 1 >= maxDegree:\n maxDegree = num + 1\n ans = len(nums)\n if maxDegree == 1:\n return 1\n else:\n for key in degree.keys():\n if degree[key][2] == maxDegree:\n if degree[key][1] - degree[key][0] + 1 <= ans:\n ans = degree[key][1] - degree[key][0] + 1\n return ans\n\n\nif __name__ == '__main__':\n s = Solution()\n nums = [1, 2, 2, 3, 0, 4, 1]\n print(s.findShortestSubArray(nums))\n","sub_path":"0697-Degree of an Array.py","file_name":"0697-Degree of an Array.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"417436303","text":"import cached_url\nfrom bs4 import BeautifulSoup\nimport yaml\n\ndomain = 'https://weixin.sogou.com'\naccount_search_prefix = '/weixin?type=1&query='\n\nwith open('credential') as f:\n\tcredential = yaml.load(f, Loader=yaml.FullLoader)\n\nclass SoupGet(object):\n\tdef getAccountNewArticle(self, name):\n\t\tcontent = cached_url.get(\n\t\t\tdomain + account_search_prefix + name, \n\t\t\theaders = {'cookie': credential['cookie']})\n\t\tsoup = BeautifulSoup(content, 'html.parser')\n\t\titem = soup.find('a', uigs='account_article_0')\n\t\tif not item:\n\t\t\twith open('tmp/account.html', 'w') as f:\n\t\t\t\tf.write(content)\n\t\treturn item and domain + item['href']\n\n\tdef getArticleUrl(self, url):\n\t\tcontent = cached_url.get(\n\t\t\turl,\n\t\t\tforce_cache=True,\n\t\t\theaders = {'cookie': credential['cookie']})\n\t\t# 为反爬,搜狗会返回一段js,我们来parse这段js\n\t\tparts = content.split(\"url += '\")\n\t\turl = []\n\t\tfor x in parts[1:]:\n\t\t\turl.append(x.split(\"';\")[0])\n\t\turl = ''.join(url).replace('@', '')\n\t\treturn url","sub_path":"soup_get/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"261066453","text":"import socket\nimport os\nimport numpy as np\nfrom os.path import join\n\n# Global parameters\nLEN_SAMPLE = 50\nNONOVERLAP = 25\nN_KPTS = C = 14\nCAMERA_HEIGHT = CAM_H = 1232\nCAMERA_WIDTH = CAM_W = 1640\n\n# The net_resolution parameter of OpenPose. It should be multiples of 16\nOP_NET_W = 448\nOP_NET_H = 336\nOP_POSE_TYPE = \"COCO\"\n\nNUM_CAMERAS = 8\n\nDLT_MAX_ITERS = 10\nDLT_MAX_ERROR_RATIO = 10\n\nHEAD_HEATMAP_INDICES = [0, 14, 15, 16, 17]\nMASK_RADIUS = 0.0\nN_DIGITS = 6\nCONFIDENCE_THREASHOLD = 0.6\nCONFIDENCE_3D_THREASHOLD = 0.5\nLINE_DIST_THRESHOLD = 250\nPROJ_DIST_THRESHOLD = 100\nMIN_DETECTED_POINTS = 5\nMIN_ASSOCIATE_POINTS = 2\nBNDRY = 0.03\nMAX_NUM_BBOX = 10\n\nCAMERA_FPS = 15\nRF_FPS = 30\n\nCAMERA_MIN_VIEWS = 2\n\nBOX_MARGIN = 0\nBOX_X_3D = [-5000 - BOX_MARGIN, 5000 + BOX_MARGIN]\nBOX_Y_3D = [-1500 - BOX_MARGIN, 1500 + BOX_MARGIN]\nBOX_R_3D = [1000 - BOX_MARGIN, 11000 + BOX_MARGIN]\nBBOX_EXTEND = 200\n\nHOR_SHAPE = (2, 200, 200)\nVER_SHAPE = (2, 60, 200)\nINPUT_SHAPE = (HOR_SHAPE[1], VER_SHAPE[1], HOR_SHAPE[2])\nRF_HOR_BYTES = np.prod(HOR_SHAPE) * 4\nRF_VER_BYTES = np.prod(VER_SHAPE) * 4\nRF_FRAME_BYTES = RF_HOR_BYTES + RF_VER_BYTES\n\nHEATMAP_X = np.linspace(-5, 5, 200)\nHEATMAP_Y = np.linspace(-1.5, 1.5, 60)\nHEATMAP_Z = np.linspace(1, 11, 200)\nassert len(HEATMAP_X) == HOR_SHAPE[1]\nassert len(HEATMAP_Y) == VER_SHAPE[1]\nassert len(HEATMAP_Z) == HOR_SHAPE[2] and len(HEATMAP_Z) == VER_SHAPE[2]\nKPT_INDS2NAME = [\"Neck\", \"RShoulder\", \"RElbow\", \"RWrist\", \"LShoulder\", \"LElbow\", \"LWrist\",\n \"RHip\", \"RKnee\", \"RAnkle\", \"LHip\", \"LKnee\", \"LAnkle\", \"Head\"]\n\nKPT_NAME2INDS = {\"Neck\": 0, \"RShoulder\": 1, \"RElbow\": 2, \"RWrist\": 3, \"LShoulder\": 4, \"LElbow\": 5, \"LWrist\": 6,\n \"RHip\": 7, \"RKnee\": 8, \"RAnkle\": 9, \"LHip\": 10, \"LKnee\": 11, \"LAnkle\": 12, \"Head\": 13}\n\n# space output size of feature net.\nBU_HEATMAP_HOR_SHAPE, BU_HEATMAP_VER_SHAPE = (200, 200), (60, 200)\nBU_HEATMAP_SHAPE = (BU_HEATMAP_HOR_SHAPE[0], BU_HEATMAP_VER_SHAPE[0], BU_HEATMAP_HOR_SHAPE[1])\nBU_DOWN_SAMPLE_RATE = [i / float(j) for i, j in zip(INPUT_SHAPE, BU_HEATMAP_SHAPE)]\n\n# Machine-dependent paths\nname = socket.gethostname()\n\n# Non-linear coefficients of the antenna array\nNON_LINEAR_COEFFICIENTS = \"/data/netmit/rf-vision/3d/nonlinear_array_coef/nonlinear_coefficients.npz\"\nNON_LINEAR_COEFFICIENTS_XYR = \"/data/netmit/rf-vision/3d/nonlinear_array_coef/nonlinear_coefficients_xyr.npz\"\n\nRAW_DATA_PATH = \"/data/netmit/rf-vision/3d/raw\"\nDATA_PATH = \"/data/netmit/rf-vision/3d/processed\"\nLIB_OPENPOSE_PATH = \"/usr/local/openpose\"\nVIZ_PATH = \"/data/netmit/rf-vision/3d/processed/viz\"\nCODE_BACKUP_PATH = \"/afs/csail.mit.edu/u/m/mingmin/backup/rf-vision/3d\"\n\nIMAGES_DATA_PATH = \"/data/scratch-oc40/rf-vision/3d/processed\"\n\n# Remote (laptop) data folders\nREMOTE_RAW_CALIBRATE_FOLDER = join(\"{log_path}\", \"{loc}\", \"calibration\")\nREMOTE_RAW_CAMERA_FOLDER = join(\"{log_path}\", \"{loc}\", \"{exp}\", )\nREMOTE_RAW_RF_FOLDER = join(\"{log_path}\", \"{loc}\", \"{exp}\")\nREMOTE_RAW_CALIBRATE_PARAMS = join(\"{log_path}\", \"{loc}\", \"camera_matrices\")\n\n# Raw data folders\nRAW_CALIBRATE_TAR_FILE = join(RAW_DATA_PATH, \"calibration\", \"{loc}.tar.gz\")\nRAW_CAMERA_LOCATION_FOLDER = join(RAW_DATA_PATH, \"camera\", \"{loc}\")\nRAW_CAMERA_EXP_FOLDER = join(RAW_DATA_PATH, \"camera\", \"{loc}\", \"{exp}\")\nRAW_CAMERA_FOLDER = join(RAW_DATA_PATH, \"camera\", \"{loc}\", \"{exp}\", \"{cam}\")\nRAW_RF_FOLDER = join(RAW_DATA_PATH, \"rf\", \"{loc}\", \"{exp}\")\nRAW_RF_TEMP_FOLDER = join(\"/tmp\", \"rf\", \"{loc}\", \"{exp}\")\n\nSYNC_CALIBRATE_FOLDER = join(DATA_PATH, \"sync\", \"calibration\", \"{loc}\")\nSYNC_CAMERA_EXP_FOLDER = join(IMAGES_DATA_PATH, \"sync\", \"camera\", \"fps-{fps}\", \"{loc}\", \"{exp}\")\nSYNC_CAMERA_EXP_FOLDER_LMDB = join(IMAGES_DATA_PATH, \"sync\", \"camera\", \"fps-{fps}\", \"{loc}\", \"{exp}-lmdb\")\nSYNC_CAMERA_FOLDER = join(IMAGES_DATA_PATH, \"sync\", \"camera\", \"fps-{fps}\", \"{loc}\", \"{exp}\", \"{cam}\")\nSYNC_CAMERA_FOLDER_LMDB = join(IMAGES_DATA_PATH, \"sync\", \"camera\", \"fps-{fps}\", \"{loc}\", \"{exp}-lmdb\", \"{cam}\")\nSYNC_CAMERA_FOLDER_TMP = join(\"/tmp\", \"sync\", \"camera\", \"fps-{fps}\", \"{loc}\", \"{exp}\", \"{cam}\")\nSYNC_RF_FOLDER_BIN = join(DATA_PATH, \"sync\", \"rf\", \"{array_type}\", \"fps-{fps}\", \"{loc}\", \"{exp}-bin\")\nSYNC_RF_SUB_MEDIAN_FOLDER = join(DATA_PATH, \"sync\", \"rf-sub-median\", \"{array_type}\", \"fps-{fps}\", \"{loc}\", \"{exp}\")\nSYNC_RF_MEDIAN_NPZ = join(DATA_PATH, \"sync\", \"rf-median\", \"{array_type}\", \"fps-{fps}\", \"{loc}-{exp}.npz\")\n\nOPENPOSE_HTMP_EXP_FOLDER = join(DATA_PATH, \"openpose\", \"heatmap\", \"fps-{fps}\", \"{loc}\", \"{exp}\")\nOPENPOSE_LOCAL_HTMP_FOLDER = join(\"/tmp\", \"openpose\", \"heatmap\", \"fps-{fps}\", \"{loc}\", \"{exp}\", \"{cam}\")\nOPENPOSE_KPT_EXP_FOLDER_LMDB = join(DATA_PATH, \"openpose\", \"keypoint\", \"fps-{fps}\", \"{loc}\", \"{exp}-lmdb\")\nOPENPOSE_KPT_FOLDER_LMDB = join(DATA_PATH, \"openpose\", \"keypoint\", \"fps-{fps}\", \"{loc}\", \"{exp}-lmdb\", \"{cam}\")\nOPENPOSE_LOCAL_KPT_FOLDER = join(\"/tmp\", \"openpose\", \"keypoint\", \"fps-{fps}\", \"{loc}\", \"{exp}\", \"{cam}\")\nOPENPOSE_LOCAL_KPT_JSON_FOLDER = join(\"/tmp\", \"openpose\", \"keypoint-json\", \"fps-{fps}\", \"{loc}\", \"{exp}\", \"{cam}\")\n\nCAMERA_PARAMS_FOLDER = join(DATA_PATH, \"camera-parameters\", \"{loc}\")\nSKELETON_3D_FOLDER = join(DATA_PATH, \"skeleton3d\", \"fps-{fps}\", \"{loc}\", \"{exp}\")\nSKELETON_3D_RAW_FILE = join(DATA_PATH, \"skeleton3d\", \"fps-{fps}\", \"{loc}\", \"{exp}\", \"skeleton_raw.json\")\nSKELETON_3D_SMOOTHED_FILE = join(DATA_PATH, \"skeleton3d\", \"fps-{fps}\", \"{loc}\", \"{exp}\", \"skeleton_smoothed.json\")\nSKELETON_3D_SMOOTHED_LMDB = join(DATA_PATH, \"skeleton3d\", \"fps-{fps}\", \"{loc}\", \"{exp}\", \"skeleton-smoothed-lmdb\")\nVIZ_FOLDER = join(VIZ_PATH, \"views-{views}\", \"fps-{fps}\", \"{loc}\", \"{exp}\")\n\nRAW_VIDEO_FOLDER = join(DATA_PATH, \"raw-video\", \"raw\", \"{loc}\", \"{exp}\")\nCAMERA_SYSTEM_VIDEO_FOLDER = join(DATA_PATH, \"raw-video\", \"cameras\", \"{loc}\", \"{exp}\")\n\n# Trained models\nMODEL_PATH = \"/data/netmit/rf-vision/3d/models\"\n\nCHECKPOINT_PATH = join(\"/data/netmit/rf-vision/3d/models\", \"{model_name}\", \"checkpoint_{epoch}.pth.tar\")\n\n# Prediction Folder Structures\nPREDICTION_PATH = \"/data/netmit/rf-vision/3d/predictions\"\n\nPRED_MODEL_PATH = join(PREDICTION_PATH, \"{model_name}\", \"{epoch}\")\nPREDICTION_FOLDER = join(PREDICTION_PATH, \"{model_name}\", \"{epoch}\", \"{loc}\", \"{exp}\")\n\nPRED_SKELETON_3D_FILE = join(PREDICTION_FOLDER, \"skeleton_raw.json\")\nPRED_SKELETON_3D_SMOOTHED_FILE = join(PREDICTION_FOLDER, \"skeleton_smoothed.json\")\n\nPRED_RCNN_BBOX_FILE = join(PREDICTION_FOLDER, \"rcnn_bbox.json\")\nPRED_RPN_BBOX_FILE = join(PREDICTION_FOLDER, \"rpn_bbox.json\")\n\n# Dataset configuration\nDATA_CONFIG_PATH = \"/data/netmit/rf-vision/3d/config\"\nDATASET_PATH = join(DATA_CONFIG_PATH, \"datasets\")\nDATASET_LEN_FILE = join(DATA_CONFIG_PATH, \"dataset_len.json\")\n\n\"\"\" keypoint \"\"\"\n\nKPT_NAMES = [\n \"neck\",\n \"R shoulder\",\n \"R elbow\",\n \"R wrist\",\n \"L shoulder\",\n \"L elbow\",\n \"L wrist\",\n \"R hip\",\n \"R knee\",\n \"R ankle\",\n \"L hip\",\n \"L knee\",\n \"L ankle\",\n \"head\"]\n\nKPT_LINE = [\n (\"head\", \"neck\"),\n (\"neck\", \"L shoulder\"),\n (\"neck\", \"R shoulder\"),\n (\"L shoulder\", \"L elbow\"),\n (\"R shoulder\", \"R elbow\"),\n (\"L elbow\", \"L wrist\"),\n (\"R elbow\", \"R wrist\"),\n (\"neck\", \"L hip\"),\n (\"neck\", \"R hip\"),\n (\"L hip\", \"R hip\"),\n (\"L hip\", \"L knee\"),\n (\"R hip\", \"R knee\"),\n (\"R knee\", \"R ankle\"),\n (\"L knee\", \"L ankle\")\n]\n\nKPT_LINE_IDX = [(KPT_NAMES.index(t[0]), KPT_NAMES.index(t[1])) for t in KPT_LINE]\n","sub_path":"config_rf.py","file_name":"config_rf.py","file_ext":"py","file_size_in_byte":7301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"207329740","text":"import speech_recognition as sr\nimport os\nimport time\n\ntime.sleep(1)\n\nr1 = sr.Recognizer()\n\nnames = []\n\nwhile True:\n f = open(\"./temp/tempfile2.txt\",'+a')\n flag = 0\n with sr.Microphone() as source :\n #print('\\nSPEAK NOW\\n')\n r1.adjust_for_ambient_noise(source, duration=0.2)\n audio = r1.listen(source, phrase_time_limit = 3)\n text = r1.recognize_google(audio, language=\"en-IN\", show_all=True)\n try:\n dict = text['alternative']\n for i in dict:\n s = i[\"transcript\"].lower()\n k = s.split(\" \")\n for j in k:\n j = str(j)\n f.write(j+'\\n')\n except Exception as e:\n continue\n f.close()\n","sub_path":"background/bgrun2.py","file_name":"bgrun2.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"589774235","text":"import os\nimport json\nimport requests\n\nfrom django.conf import settings\n\nfrom .models import MeetupEvent\n\n\ndef fetch_events():\n \"\"\"Queries the Meetup API for events that a user has RSVPed.\"\"\"\n url = \"https://api.meetup.com/self/events\"\n response = requests.get(url, params={\"key\": os.environ['MEETUP_API_KEY']})\n events = json.loads(response.text)\n return events\n\n\ndef store_events(events):\n \"\"\"Stores a list of Meetup events in the database.\"\"\"\n for event in events:\n event_data = {\n 'event_id': event['id'],\n 'event_name': event['name'],\n 'event_status': event['status'],\n 'event_link': event['link'],\n 'group_name': event['group']['name'],\n 'group_urlname': event['group']['urlname']\n }\n MeetupEvent.objects.update_or_create(\n event_id = event['id'],\n defaults = event_data\n )\n print(\"\\nMeetup event(s) storage completed!\\n\")\n\n\ndef delete_all_events():\n MeetupEvent.objects.all().delete()\n\n\ndef fetch_and_store_events():\n store_events(fetch_events())\n","sub_path":"robinsonc/apps/meetup/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"222620616","text":"from collections import *\nfrom functools import reduce\nimport copy\nimport itertools\nimport random\nimport sys\n\ndef rot(g):\n g = list(list(r) for r in g)\n n = len(g)\n assert(n == len(g[0]))\n out = [[None for _ in range(n)] for _ in range(n)]\n for i in range(n):\n for j in range(n):\n out[i][j] = g[n - j - 1][i]\n\n return tuple(''.join(row) for row in out)\n\ndef flip(g):\n g = list(list(r) for r in g)\n n = len(g)\n assert(n == len(g[0]))\n out = [[None for _ in range(n)] for _ in range(n)]\n for i in range(n):\n for j in range(n):\n out[i][j] = g[j][i]\n\n return tuple(''.join(row) for row in out)\n\ndef extract(grid, i, j, s):\n return tuple(grid[i + x][j:j+s] for x in range(s))\n\ndef place(mat, i, j, block):\n n = len(block)\n for x in range(n):\n for y in range(n):\n mat[i + x][j + y] = block[x][y]\n\ndef expand(rules, grid):\n n = len(grid)\n sz = 0\n if n % 2 == 0:\n sz = 3 * (n // 2)\n else:\n sz = 4 * (n // 3)\n out = [[None for _ in range(sz)] for _ in range(sz)]\n if n % 2 == 0:\n for i in range(0, n, 2):\n for j in range(0, n, 2):\n seed = extract(grid, i, j, 2)\n place(out, 3 * (i // 2), 3 * (j // 2), rules[seed])\n else:\n for i in range(0, n, 3):\n for j in range(0, n, 3):\n seed = extract(grid, i, j, 3)\n place(out, 4 * (i // 3), 4 * (j // 3), rules[seed])\n\n return tuple(''.join(row) for row in out)\n\ndef main():\n m = {}\n for line in sys.stdin:\n rule = line.strip().split(' => ')\n pred = tuple(rule[0].split('/'))\n suff = tuple(rule[1].split('/'))\n\n for _ in range(2):\n for _ in range(4):\n m[pred] = suff\n pred = rot(pred)\n\n pred = flip(pred)\n\n grid = ['.#.', '..#', '###']\n for _ in range(18):\n grid = expand(m, grid)\n\n ans = 0\n for row in grid:\n ans += sum(1 for c in row if c == '#')\n\n print(ans)\n\nmain()\n","sub_path":"advent/2017/day21.py","file_name":"day21.py","file_ext":"py","file_size_in_byte":2049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"434473584","text":"\"\"\"\n9.4 Write a program to read through the mbox-short.txt and figure out who has sent the greatest number of \nmail messages. The program looks for 'From ' lines and takes the second word of those lines as the person \nwho sent the mail. The program creates a Python dictionary that maps the sender's mail address to a count of \nthe number of times they appear in the file. After the dictionary is produced, \nthe program reads through the dictionary using a maximum loop to find the most prolific committer.\n\"\"\"\n\nfile_h = open(\"mbox-short.txt\")\nnames_list = []\nnames_dict = {}\n\nfor line in file_h:\n if line.startswith(\"From \"):\n line_list = line.split()\n names = line_list[1]\n names_list.append(names)\n else:\n continue\n\n#print(names_list)\nfor name in names_list:\n names_dict[name] = names_dict.get(name,0) + 1\n\nbig_name = list(names_dict.keys())\nbig_count = list(names_dict.values())\n\nmax_index = big_count.index(max(big_count))\nprint(\"dict:\", big_name[max_index],big_count[max_index])","sub_path":"course_era/python-datastructures/week5_chap9-dcitionary/5.7.1.py","file_name":"5.7.1.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"395192022","text":"def find_majority(arr):\n res, count = 0, 1\n n = len(arr)\n for i in range(1, n):\n if arr[res] == arr[i]:\n count += 1\n else:\n count -= 1\n if count == 0:\n res = i\n count = 1\n count = 0\n for i in range(n):\n if arr[i] == arr[res]:\n count += 1\n if count <= n//2:\n return -1\n return res\n\narr = list(map(int, input('Enter array: ').split()))\nprint(f'Majority element in array is {find_majority(arr)}')","sub_path":"Searching/Majority Element.py","file_name":"Majority Element.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"} +{"seq_id":"80813146","text":"#!/usr/bin/env python2\n\n\nimport sys, os, time\nimport shutil\nfrom pdb import set_trace\nfrom glob import glob\nfrom argparse import ArgumentParser\nimport re\n#import URAnalysis.Utilities.prettyjson as prettyjson\n#from URAnalysis.PlotTools.data_views import get_best_style, log #not for the same purpose, but the same mechanism\n#import rootpy\n#log.setLevel(log.CRITICAL)\n\nswdir = os.path.realpath(os.environ['CMSSW_BASE'])\njobid = 'jobid' #os.environ['jobid']\ninputdir = os.path.join(swdir, 'inputs')\ninputdir = os.path.join(inputdir, jobid)\n\nparser = ArgumentParser('submit analyzer to the batch queues')\nparser.add_argument('configfile')\nparser.add_argument('jobdir')\nparser.add_argument('--file',default='samples.cfg',help='file containing a sample list')\nparser.add_argument('--nosubmit',default=False,help='no submission')\n\nargs = parser.parse_args()\n\n\nif os.path.isdir(args.jobdir):\n print (args.jobdir), 'exists: EXIT'\n sys.exit(-1)\nos.mkdir(args.jobdir)\nconfigFile=os.path.abspath(args.configfile)\nshutil.copy(configFile, args.jobdir)\nconfigFile=os.path.abspath(os.path.join(args.jobdir, os.path.basename(configFile)))\nprint ('submitting jobs for '+configFile)\n\nsamplesdir='DeepNTuples.DeepNtuplizer.samples.'\n\n#format: njobs sample output args1 args2 ... (simple whitespace)\nlines = [line.rstrip('\\n') for line in open(args.file)]\n\nfor sampledescription in lines:\n \n if sampledescription.strip().startswith(\"#\"):\n continue\n \n entries= [s.strip() for s in sampledescription.split(' ') if s]\n if len(entries) < 3:\n continue\n \n nJobs=entries[0]\n sample=samplesdir+entries[1]\n outputFile=entries[2]\n jobargs=''\n if len(entries) >3:\n jobargs=entries[3]\n jobpath = os.path.join(\n args.jobdir, \n outputFile\n )\n jobpath=os.path.abspath(jobpath)\n os.mkdir(jobpath) \n sheelscp=os.path.abspath(os.path.join(jobpath, 'batchscript.sh'))\n \n #create full output path on eos\n cernboxpath='/eos/user/'+os.environ['USER'][0]+'/'+os.environ['USER']+'/'\n #print (cernboxpath)\n ntupleOutDir=cernboxpath+'DeepNtuples/'+time.strftime('%a_%H%M%S')+'_'+args.jobdir+'/'+outputFile+'/'\n os.makedirs(ntupleOutDir)\n #print (ntupleOutDir)\n \n #link to ntupleOutDir\n os.symlink(ntupleOutDir,jobpath+'/output')\n \n condorfile =\"\"\"executable = batchscript.sh\narguments = {configfile} inputScript={sample} outputFile={ntupledir}{outputfile} nJobs={njobs} job=$(ProcId) {options}\noutput = batch/con_$(ClusterId).$(ProcId).out\nerror = batch/con_$(ClusterId).$(ProcId).err\nlog = batch/con_$(ClusterId).$(ProcId).log\nsend_credential = True\nuse_x509userproxy = True\nqueue {njobs}\n\"\"\".format(\n batchscriptpath=sheelscp,\n configfile=configFile, \n sample=sample,\n ntupledir=ntupleOutDir,\n outputfile=outputFile,\n njobs=nJobs, \n options=jobargs\n )\n \n conf = open(os.path.join(jobpath, 'condor.sub'), 'w')\n conf.write(condorfile)\n conf.close()\n print(\"wrote condor file for \"+outputFile)\n os.mkdir(jobpath+'/batch')\n \n \n #create script\n shellscript = \"\"\"#!/bin/bash\necho \"JOBSUB::RUN job running\"\ntrap \"echo JOBSUB::FAIL job killed\" SIGTERM\ncd {basedir}\neval `scramv1 runtime -sh`\ncd {jobdir}\ncmsRun \"$@\"\nexitstatus=$?\nif [ $exitstatus != 0 ]\nthen\necho JOBSUB::FAIL job failed with status $exitstatus\nelse\necho JOBSUB::SUCC job ended sucessfully\nfi\n \"\"\".format(\n basedir=swdir,\n jobdir=os.path.abspath(args.jobdir)\n )\n \n shellsc = open(sheelscp, 'w')\n shellsc.write(shellscript)\n shellsc.close()\n os.system('chmod +x '+sheelscp)\n if not args.nosubmit:\n os.system('cd ' + jobpath + ' && condor_submit condor.sub') # gives back submitted to cluster XXX message - use\n\n\n\n\nexit()\n\n","sub_path":"DeepNtuplizer/scripts/jobSub.py","file_name":"jobSub.py","file_ext":"py","file_size_in_byte":3912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"53"}